code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
def lowercase__ ( snake_case_ :list ):
__UpperCAmelCase = len(snake_case_ )
for i in range(1 , snake_case_ ):
__UpperCAmelCase = collection[i]
__UpperCAmelCase = 0
__UpperCAmelCase = i - 1
while low <= high:
__UpperCAmelCase = (low + high) // 2
if val < collection[mid]:
__UpperCAmelCase = mid - 1
else:
__UpperCAmelCase = mid + 1
for j in range(snake_case_ , snake_case_ , -1 ):
__UpperCAmelCase = collection[j - 1]
__UpperCAmelCase = val
return collection
if __name__ == "__main__":
_lowercase : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
_lowercase : str = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 332 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase : Union[str, Any] = {
'configuration_resnet': ['RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ResNetConfig', 'ResNetOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'ResNetForImageClassification',
'ResNetModel',
'ResNetPreTrainedModel',
'ResNetBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = [
'TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFResNetForImageClassification',
'TFResNetModel',
'TFResNetPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = [
'FlaxResNetForImageClassification',
'FlaxResNetModel',
'FlaxResNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 332 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class a__ :
def __init__( self , UpperCAmelCase , UpperCAmelCase=1_3 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=4 , UpperCAmelCase=3_7 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ) -> Optional[int]:
__a = parent
__a = 1_3
__a = 7
__a = True
__a = True
__a = True
__a = True
__a = 9_9
__a = 3_8_4
__a = 2
__a = 4
__a = 3_7
__a = 'gelu'
__a = 0.1
__a = 0.1
__a = 5_1_2
__a = 1_6
__a = 2
__a = 0.02
__a = 3
__a = 4
__a = 1_2_8
__a = 2
__a = 9
__a = 1
__a = None
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
__a = TFConvBertModel(config=UpperCAmelCase )
__a = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__a = [input_ids, input_mask]
__a = model(UpperCAmelCase )
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any:
__a = TFConvBertForMaskedLM(config=UpperCAmelCase )
__a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
__a = self.num_labels
__a = TFConvBertForSequenceClassification(config=UpperCAmelCase )
__a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
__a = self.num_choices
__a = TFConvBertForMultipleChoice(config=UpperCAmelCase )
__a = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__a = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__a = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
__a = self.num_labels
__a = TFConvBertForTokenClassification(config=UpperCAmelCase )
__a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
__a = TFConvBertForQuestionAnswering(config=UpperCAmelCase )
__a = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__a = model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class a__ ( __snake_case , __snake_case , unittest.TestCase ):
A__ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A__ : Dict = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ : Optional[int] = False
A__ : Dict = False
A__ : Union[str, Any] = False
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = TFConvBertModelTester(self )
__a = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=3_7 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = True
if hasattr(UpperCAmelCase , 'use_cache' ):
__a = True
__a = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__a = getattr(self.model_tester , 'key_length' , UpperCAmelCase )
for model_class in self.all_model_classes:
__a = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__a = model_class(UpperCAmelCase )
__a = len(model(UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase , saved_model=UpperCAmelCase )
__a = os.path.join(UpperCAmelCase , 'saved_model' , '1' )
__a = tf.keras.models.load_model(UpperCAmelCase )
__a = model(UpperCAmelCase )
if self.is_encoder_decoder:
__a = outputs['encoder_hidden_states']
__a = outputs['encoder_attentions']
else:
__a = outputs['hidden_states']
__a = outputs['attentions']
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
__a = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__a = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__a = getattr(self.model_tester , 'key_length' , UpperCAmelCase )
__a = getattr(self.model_tester , 'key_length' , UpperCAmelCase )
def check_decoder_attentions_output(UpperCAmelCase ):
__a = len(UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__a = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(UpperCAmelCase ):
__a = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__a = True
__a = False
__a = model_class(UpperCAmelCase )
__a = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
__a = len(UpperCAmelCase )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
if self.is_encoder_decoder:
__a = model_class(UpperCAmelCase )
__a = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_decoder_attentions_output(UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(UpperCAmelCase )
__a = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(UpperCAmelCase )
__a = model(self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase )
check_encoder_attentions_output(UpperCAmelCase )
@require_tf
class a__ ( unittest.TestCase ):
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__a = tf.constant([[0, 1, 2, 3, 4, 5]] )
__a = model(UpperCAmelCase )[0]
__a = [1, 6, 7_6_8]
self.assertEqual(output.shape , UpperCAmelCase )
__a = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 370 | import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a__ ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase ) -> List[Any]:
__a = parent
__a = config_class
__a = has_text_modality
__a = kwargs
__a = common_properties
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.config_class(**self.inputs_dict )
__a = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(UpperCAmelCase , UpperCAmelCase ) , msg=f'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(UpperCAmelCase ):
try:
setattr(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
self.parent.assertEqual(
getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase , UpperCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(UpperCAmelCase ):
try:
__a = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase , msg=f'''`{name} value {idx} expected, but was {getattr(UpperCAmelCase , UpperCAmelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
__a = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCAmelCase , 'config.json' )
config_first.to_json_file(UpperCAmelCase )
__a = self.config_class.from_json_file(UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
__a = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(UpperCAmelCase )
__a = self.config_class.from_pretrained(UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
__a = self.config_class(**self.inputs_dict )
__a = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCAmelCase , UpperCAmelCase )
config_first.save_pretrained(UpperCAmelCase )
__a = self.config_class.from_pretrained(UpperCAmelCase , subfolder=UpperCAmelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__a = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
if self.config_class.is_composition:
return
__a = self.config_class()
self.parent.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = copy.deepcopy(UpperCAmelCase )
__a = self.config_class(**UpperCAmelCase )
__a = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(UpperCAmelCase , UpperCAmelCase ) != value:
wrong_values.append((key, getattr(UpperCAmelCase , UpperCAmelCase ), value) )
if len(UpperCAmelCase ) > 0:
__a = '\n'.join([f'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(f'''The following keys were not properly set in the config:\n{errors}''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 197 | 0 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : int = 0
# if input_string is "aba" than new_input_string become "a|b|a"
lowerCAmelCase__ : Union[str, Any] = """"""
lowerCAmelCase__ : Tuple = """"""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(UpperCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = 0, 0
# length[i] shows the length of palindromic substring with center i
lowerCAmelCase__ : str = [1 for i in range(len(UpperCamelCase ) )]
# for each character in new_string find corresponding palindromic string
lowerCAmelCase__ : Optional[int] = 0
for j in range(len(UpperCamelCase ) ):
lowerCAmelCase__ : Tuple = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(UpperCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
lowerCAmelCase__ : str = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
lowerCAmelCase__ : Tuple = j - k + 1 # noqa: E741
lowerCAmelCase__ : Dict = j + k - 1
# update max_length and start position
if max_length < length[j]:
lowerCAmelCase__ : List[str] = length[j]
lowerCAmelCase__ : Union[str, Any] = j
# create that string
lowerCAmelCase__ : List[Any] = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37 |
"""simple docstring"""
snake_case__ : str = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
snake_case__ : Optional[Any] = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
snake_case__ : Any = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
snake_case__ : Optional[Any] = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
snake_case__ : int = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
snake_case__ : Union[str, Any] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
snake_case__ : List[Any] = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
snake_case__ : Optional[int] = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 60 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-1'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-2'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-3'
SCREAMING_SNAKE_CASE__ = 'CompVis/stable-diffusion-v1-4'
class a_ ( a_ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Optional[int]:
"""simple docstring"""
super()._init_()
UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase_ )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase_ )
UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase_ )
UpperCamelCase = StableDiffusionPipeline(
vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , requires_safety_checker=lowercase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ) -> Dict[str, Any]:
"""simple docstring"""
return {k: getattr(self , lowercase_ ) for k in self.config.keys() if not k.startswith("""_""" )}
def A__ ( self , _SCREAMING_SNAKE_CASE = "auto" ) -> Optional[int]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def A__ ( self ) -> Tuple:
"""simple docstring"""
self.enable_attention_slicing(lowercase_ )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[str]:
"""simple docstring"""
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 512 , _SCREAMING_SNAKE_CASE = 50 , _SCREAMING_SNAKE_CASE = 7.5 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 0.0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , **_SCREAMING_SNAKE_CASE , ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(lowercase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCamelCase = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCamelCase = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCamelCase = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCamelCase = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 364 |
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import AddedToken
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'vocab.txt'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'facebook/esm2_t6_8M_UR50D': 'https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt',
'facebook/esm2_t12_35M_UR50D': 'https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt',
},
}
SCREAMING_SNAKE_CASE__ = {
'facebook/esm2_t6_8M_UR50D': 1_0_2_4,
'facebook/esm2_t12_35M_UR50D': 1_0_2_4,
}
def lowercase__ ( __UpperCamelCase )-> Any:
with open(__UpperCamelCase , """r""" ) as f:
UpperCamelCase = f.read().splitlines()
return [l.strip() for l in lines]
class a_ ( lowerCamelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="<eos>" , **_SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = load_vocab_file(_SCREAMING_SNAKE_CASE )
UpperCamelCase = dict(enumerate(self.all_tokens ) )
UpperCamelCase = {tok: ind for ind, tok in enumerate(self.all_tokens )}
UpperCamelCase = unk_token
UpperCamelCase = cls_token
UpperCamelCase = pad_token
UpperCamelCase = mask_token
UpperCamelCase = eos_token
UpperCamelCase = self.all_tokens
self._create_trie(self.unique_no_split_tokens )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def A__ ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return text.split()
def A__ ( self , _SCREAMING_SNAKE_CASE=False ) -> Dict:
"""simple docstring"""
return len(self._id_to_token )
def A__ ( self ) -> Tuple:
"""simple docstring"""
return {token: i for i, token in enumerate(self.all_tokens )}
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) )
def A__ ( self , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.eos_token_id] # No sep token in ESM vocabulary
if token_ids_a is None:
if self.eos_token_id is None:
return cls + token_ids_a
else:
return cls + token_ids_a + sep
elif self.eos_token_id is None:
raise ValueError("""Cannot tokenize multiple sequences when EOS token is not set!""" )
return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if token in self.all_special_ids else 0 for token in token_ids_a]
UpperCamelCase = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
mask += [0] * len(_SCREAMING_SNAKE_CASE ) + [1]
return mask
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = os.path.join(_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + """vocab.txt""" )
with open(_SCREAMING_SNAKE_CASE , """w""" ) as f:
f.write("""\n""".join(self.all_tokens ) )
return (vocab_file,)
@property
def A__ ( self ) -> int:
"""simple docstring"""
return self.get_vocab_size(with_added_tokens=_SCREAMING_SNAKE_CASE )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> int:
"""simple docstring"""
return super()._add_tokens(_SCREAMING_SNAKE_CASE , special_tokens=_SCREAMING_SNAKE_CASE )
| 183 | 0 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
UpperCamelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
UpperCamelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
# pass variant but use the non-variant filenames
UpperCamelCase__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
UpperCamelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ = """fp16"""
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
UpperCamelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
# pass variant but use the non-variant filenames
UpperCamelCase__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
UpperCamelCase__ = """fp16"""
self.assertTrue(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
UpperCamelCase__ = """fp16"""
self.assertFalse(is_safetensors_compatible(SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ ) )
| 244 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """visual_bert"""
def __init__(self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = visual_embedding_dim
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = bypass_transformer
UpperCamelCase__ = special_visual_initialize
| 244 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A = {
"configuration_canine": ["CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CanineConfig"],
"tokenization_canine": ["CanineTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
"CANINE_PRETRAINED_MODEL_ARCHIVE_LIST",
"CanineForMultipleChoice",
"CanineForQuestionAnswering",
"CanineForSequenceClassification",
"CanineForTokenClassification",
"CanineLayer",
"CanineModel",
"CaninePreTrainedModel",
"load_tf_weights_in_canine",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE = 6 ) -> None:
'''simple docstring'''
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
self.create_linked_list(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Node()
UpperCAmelCase : Dict = current_node
UpperCAmelCase : Any = current_node
UpperCAmelCase : Optional[int] = current_node
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = Node()
UpperCAmelCase : Tuple = current_node
UpperCAmelCase : Any = previous_node
UpperCAmelCase : List[Any] = current_node
UpperCAmelCase : List[str] = self.front
UpperCAmelCase : Tuple = previous_node
def SCREAMING_SNAKE_CASE ( self ) -> bool:
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def SCREAMING_SNAKE_CASE ( self ) -> Any | None:
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase : Optional[Any] = self.rear.next
if self.rear:
UpperCAmelCase : Optional[int] = data
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase : Tuple = self.front.data
UpperCAmelCase : int = None
return data
UpperCAmelCase : Dict = self.front
UpperCAmelCase : Tuple = old_front.next
UpperCAmelCase : str = old_front.data
UpperCAmelCase : int = None
return data
def SCREAMING_SNAKE_CASE ( self ) -> None:
'''simple docstring'''
if self.is_empty():
raise Exception("""Empty Queue""" )
def SCREAMING_SNAKE_CASE ( self ) -> None:
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class SCREAMING_SNAKE_CASE__ :
def __init__( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Any | None = None
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase_ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class __A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ (self ):
UpperCamelCase__ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
UpperCamelCase__ = self.diffusers_dir
shutil.copy(
os.path.join(lowercase__ , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
UpperCamelCase__ = comment + F"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
UpperCamelCase__ = comment + F"\nclass {class_name}(nn.Module):\n" + overwrite_result
UpperCamelCase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
UpperCamelCase__ = black.format_str(lowercase__ , mode=lowercase__ )
UpperCamelCase__ = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(lowercase__ , """w""" , newline="""\n""" ) as f:
f.write(lowercase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowercase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowercase__ )
with open(lowercase__ , """r""" ) as f:
self.assertTrue(f.read() , lowercase__ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(lowercase__ , lowercase__ )
def UpperCAmelCase_ (self ):
# Base copy consistency
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , lowercase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , lowercase__ ) , )
# Copy consistency with a really long name
UpperCamelCase__ = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , F"{long_class_name}SchedulerOutput" , re.sub("""Bert""" , lowercase__ , lowercase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , lowercase__ , overwrite_result=re.sub("""DDPM""" , """Test""" , lowercase__ ) , )
| 244 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( _a ):
'''simple docstring'''
a__ = "pegasus"
a__ = ["past_key_values"]
a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , lowercase__=50_265 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0 , lowercase__=False , lowercase__=0 , lowercase__=1 , lowercase__=1 , **lowercase__ , ) -> str:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
@property
def lowerCAmelCase_ (self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ (self ) -> int:
return self.d_model
| 333 | 0 |
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
__A = TypeVar('T')
__A = Union[List[T], Tuple[T, ...]]
__A = Union[T, List[T], Dict[str, T]]
__A = Union[str, bytes, os.PathLike]
| 341 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( __magic_name__ ):
'''simple docstring'''
lowerCamelCase = ['''image_processor''', '''tokenizer''']
lowerCamelCase = '''CLIPImageProcessor'''
lowerCamelCase = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCAmelCase , )
_lowerCAmelCase =kwargs.pop("""feature_extractor""" )
_lowerCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Optional[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_lowerCAmelCase =self.tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if images is not None:
_lowerCAmelCase =self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
if text is not None and images is not None:
_lowerCAmelCase =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCAmelCase ) , tensor_type=__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> List[Any]:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def _lowerCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Optional[int]:
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
@property
def _lowerCAmelCase ( self ) -> int:
_lowerCAmelCase =self.tokenizer.model_input_names
_lowerCAmelCase =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 341 | 1 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class a__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : str , a : float , a : Callable , a : int , a : float = 1.0 , a : str = None , ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = initial_learning_rate
__lowerCamelCase = warmup_steps
__lowerCamelCase = power
__lowerCamelCase = decay_schedule_fn
__lowerCamelCase = name
def __call__( self : Dict , a : str ):
"""simple docstring"""
with tf.name_scope(self.name or '''WarmUp''' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCamelCase = tf.cast(a , tf.floataa )
__lowerCamelCase = tf.cast(self.warmup_steps , tf.floataa )
__lowerCamelCase = global_step_float / warmup_steps_float
__lowerCamelCase = self.initial_learning_rate * tf.math.pow(a , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=a , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 0.9 , UpperCamelCase__ = 0.9_9_9 , UpperCamelCase__ = 1E-8 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 1.0 , UpperCamelCase__ = None , ) -> Dict:
__lowerCamelCase = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCamelCase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCamelCase__ , )
if num_warmup_steps:
__lowerCamelCase = WarmUp(
initial_learning_rate=UpperCamelCase__ , decay_schedule_fn=UpperCamelCase__ , warmup_steps=UpperCamelCase__ , )
if weight_decay_rate > 0.0:
__lowerCamelCase = AdamWeightDecay(
learning_rate=UpperCamelCase__ , weight_decay_rate=UpperCamelCase__ , beta_a=UpperCamelCase__ , beta_a=UpperCamelCase__ , epsilon=UpperCamelCase__ , clipnorm=UpperCamelCase__ , global_clipnorm=UpperCamelCase__ , exclude_from_weight_decay=['''LayerNorm''', '''layer_norm''', '''bias'''] , include_in_weight_decay=UpperCamelCase__ , )
else:
__lowerCamelCase = tf.keras.optimizers.Adam(
learning_rate=UpperCamelCase__ , beta_a=UpperCamelCase__ , beta_a=UpperCamelCase__ , epsilon=UpperCamelCase__ , clipnorm=UpperCamelCase__ , global_clipnorm=UpperCamelCase__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class a__ ( UpperCAmelCase__ ):
def __init__( self : List[Any] , a : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_01 , a : float = 0.9 , a : float = 0.9_99 , a : float = 1e-7 , a : bool = False , a : float = 0.0 , a : Optional[List[str]] = None , a : Optional[List[str]] = None , a : str = "AdamWeightDecay" , **a : Union[str, Any] , ):
"""simple docstring"""
super().__init__(a , a , a , a , a , a , **a )
__lowerCamelCase = weight_decay_rate
__lowerCamelCase = include_in_weight_decay
__lowerCamelCase = exclude_from_weight_decay
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , a : int ):
"""simple docstring"""
__lowerCamelCase = {'''WarmUp''': WarmUp}
return super(a , cls ).from_config(a , custom_objects=a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[str] , a : Any ):
"""simple docstring"""
super(a , self )._prepare_local(a , a , a )
__lowerCamelCase = tf.constant(
self.weight_decay_rate , name='''adam_weight_decay_rate''' )
def SCREAMING_SNAKE_CASE__ ( self : str , a : List[str] , a : List[str] , a : str ):
"""simple docstring"""
__lowerCamelCase = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['''weight_decay_rate'''] , use_locking=self._use_locking , )
return tf.no_op()
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : List[Any] , a : int=None , **a : Optional[int] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = list(zip(*a ) )
return super(a , self ).apply_gradients(zip(a , a ) , name=a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Any , a : Tuple , a : int ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCamelCase = apply_state or {}
__lowerCamelCase = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCamelCase = self._fallback_apply_state(a , a )
__lowerCamelCase = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def SCREAMING_SNAKE_CASE__ ( self : str , a : Optional[int] , a : Tuple , a : Tuple=None ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self._get_lr(var.device , var.dtype.base_dtype , a )
__lowerCamelCase = self._decay_weights_op(a , a , a )
with tf.control_dependencies([decay] ):
return super(a , self )._resource_apply_dense(a , a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : int , a : List[str] , a : str , a : Optional[int]=None ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self._get_lr(var.device , var.dtype.base_dtype , a )
__lowerCamelCase = self._decay_weights_op(a , a , a )
with tf.control_dependencies([decay] ):
return super(a , self )._resource_apply_sparse(a , a , a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = super().get_config()
config.update({'''weight_decay_rate''': self.weight_decay_rate} )
return config
def SCREAMING_SNAKE_CASE__ ( self : int , a : List[str] ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(a , a ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(a , a ) is not None:
return False
return True
class a__ ( UpperCAmelCase__ ):
def __init__( self : Tuple ):
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = None
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
if self._accum_steps is None:
__lowerCamelCase = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
if not self._gradients:
raise ValueError('''The accumulator should be called first to initialize the gradients''' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : int , a : Dict ):
"""simple docstring"""
if not self._gradients:
__lowerCamelCase = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(a ) , trainable=a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(a ) != len(self._gradients ):
raise ValueError(f"""Expected {len(self._gradients )} gradients, but got {len(a )}""" )
for accum_gradient, gradient in zip(self._gradients , a ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(a )
self._accum_steps.assign_add(1 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(a ) )
| 67 |
'''simple docstring'''
__lowerCAmelCase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# Make sure the supplied data is a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_SCREAMING_SNAKE_CASE )
_snake_case = """""".join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
_snake_case = len(_SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
_snake_case = b"""=""" * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6)
else:
_snake_case = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_snake_case = (
"""argument should be a bytes-like object or ASCII string, """
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
_snake_case = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
_snake_case = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_snake_case = encoded_data[:-padding]
_snake_case = """""".join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_snake_case = """""".join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
_snake_case = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod() | 341 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : List[str] =XGLMTokenizer
A__ : List[str] =XGLMTokenizerFast
A__ : Optional[Any] =True
A__ : Any =True
def A_ ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = XGLMTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = '<pad>'
SCREAMING_SNAKE_CASE__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : Any ):
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(len(UpperCAmelCase_ ) , 1008 )
def A_ ( self : Union[str, Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def A_ ( self : str ):
SCREAMING_SNAKE_CASE__ = XGLMTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def A_ ( self : Dict ):
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def A_ ( self : Optional[int] ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(UpperCAmelCase_ , f.name )
SCREAMING_SNAKE_CASE__ = XGLMTokenizer(f.name , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = pickle.dumps(UpperCAmelCase_ )
pickle.loads(UpperCAmelCase_ )
def A_ ( self : int ):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@slow
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = 'Hello World!'
SCREAMING_SNAKE_CASE__ = [2, 31227, 4447, 35]
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def A_ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
SCREAMING_SNAKE_CASE__ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(UpperCAmelCase_ , self.big_tokenizer.encode(UpperCAmelCase_ ) )
@slow
def A_ ( self : str ):
# fmt: off
SCREAMING_SNAKE_CASE__ = {
'input_ids': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='facebook/xglm-564M' , padding=UpperCAmelCase_ , )
| 169 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__snake_case = 50_00_00
__snake_case ,__snake_case = os.path.split(__file__)
__snake_case = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _lowercase ( UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dataset.map(**UpperCamelCase_ )
@get_duration
def _lowercase ( UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dataset.filter(**UpperCamelCase_ )
def _lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
SCREAMING_SNAKE_CASE__ = generate_example_dataset(
os.path.join(UpperCamelCase_ , 'dataset.arrow' ) , UpperCamelCase_ , num_examples=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=UpperCamelCase_ )
def tokenize(UpperCamelCase_ ):
return tokenizer(examples['text'] )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , batched=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='numpy' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='pandas' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=lambda UpperCamelCase_ : None , batched=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = map(UpperCamelCase_ , function=UpperCamelCase_ , batched=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = filter(UpperCamelCase_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCamelCase_ , 'wb' ) as f:
f.write(json.dumps(UpperCamelCase_ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 169 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class a ( unittest.TestCase ):
def __UpperCAmelCase ( self ) -> Tuple:
_a = tempfile.mkdtemp()
_a = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_a = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_a = os.path.join(self.tmpdirname , __magic_name__ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self , **__magic_name__ ) -> List[str]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def __UpperCAmelCase ( self , **__magic_name__ ) -> Optional[int]:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__magic_name__ )
def __UpperCAmelCase ( self , **__magic_name__ ) -> Any:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__magic_name__ )
def __UpperCAmelCase ( self ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self ) -> str:
_a = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_a = [Image.fromarray(np.moveaxis(__magic_name__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self ) -> int:
_a = self.get_tokenizer()
_a = self.get_rust_tokenizer()
_a = self.get_image_processor()
_a = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_slow.save_pretrained(self.tmpdirname )
_a = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__magic_name__ )
_a = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
processor_fast.save_pretrained(self.tmpdirname )
_a = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __magic_name__ )
self.assertIsInstance(processor_fast.tokenizer , __magic_name__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __magic_name__ )
self.assertIsInstance(processor_fast.image_processor , __magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[int]:
_a = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_a = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_a = self.get_image_processor(do_normalize=__magic_name__ , padding_value=1.0 )
_a = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__magic_name__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __magic_name__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __magic_name__ )
def __UpperCAmelCase ( self ) -> int:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
_a = self.prepare_image_inputs()
_a = image_processor(__magic_name__ , return_tensors='np' )
_a = processor(images=__magic_name__ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
_a = 'lower newer'
_a = processor(text=__magic_name__ )
_a = tokenizer(__magic_name__ , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
_a = 'lower newer'
_a = self.prepare_image_inputs()
_a = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__magic_name__ ):
processor()
def __UpperCAmelCase ( self ) -> List[str]:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
_a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_a = processor.batch_decode(__magic_name__ )
_a = tokenizer.batch_decode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = self.get_image_processor()
_a = self.get_tokenizer()
_a = AlignProcessor(tokenizer=__magic_name__ , image_processor=__magic_name__ )
_a = 'lower newer'
_a = self.prepare_image_inputs()
_a = processor(text=__magic_name__ , images=__magic_name__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 168 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {
"Helsinki-NLP/opus-mt-en-de": "https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json",
# See all Marian models at https://huggingface.co/models?filter=marian
}
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = """marian"""
_lowerCAmelCase = ["""past_key_values"""]
_lowerCAmelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __magic_name__=5_81_01 , __magic_name__=None , __magic_name__=10_24 , __magic_name__=12 , __magic_name__=40_96 , __magic_name__=16 , __magic_name__=12 , __magic_name__=40_96 , __magic_name__=16 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=True , __magic_name__=True , __magic_name__="gelu" , __magic_name__=10_24 , __magic_name__=0.1 , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.0_2 , __magic_name__=5_81_00 , __magic_name__=False , __magic_name__=5_81_00 , __magic_name__=0 , __magic_name__=0 , __magic_name__=True , **__magic_name__ , ) -> str:
_a = vocab_size
_a = decoder_vocab_size or vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
_a = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , is_encoder_decoder=__magic_name__ , decoder_start_token_id=__magic_name__ , forced_eos_token_id=__magic_name__ , **__magic_name__ , )
class a ( _SCREAMING_SNAKE_CASE ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_a = {0: 'batch'}
_a = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_a = {0: 'batch', 1: 'decoder_sequence'}
_a = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_a , _a = self.num_layers
for i in range(__magic_name__ ):
_a = {0: 'batch', 2: 'past_sequence + sequence'}
_a = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_a = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(__magic_name__ , self ).outputs
if self.use_past:
_a , _a = self.num_layers
for i in range(__magic_name__ ):
_a = {0: 'batch', 2: 'past_sequence + sequence'}
_a = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
_a = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**__magic_name__ , **__magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a , _a = common_inputs['input_ids'].shape
_a = common_inputs['decoder_input_ids'].shape[1]
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(__magic_name__ , __magic_name__ )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a = self.num_layers
_a = min(__magic_name__ , __magic_name__ )
_a = max(__magic_name__ , __magic_name__ ) - min_num_layers
_a = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(__magic_name__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
torch.zeros(__magic_name__ ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(__magic_name__ , __magic_name__ ):
common_inputs["past_key_values"].append((torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_a , _a = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a , _a = self.num_layers
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs['attention_mask'].dtype
_a = torch.cat(
[common_inputs['attention_mask'], torch.ones(__magic_name__ , __magic_name__ , dtype=__magic_name__ )] , dim=1 )
_a = [
(torch.zeros(__magic_name__ ), torch.zeros(__magic_name__ )) for _ in range(__magic_name__ )
]
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(__magic_name__ )
_a = compute_effective_axis_dimension(
__magic_name__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__magic_name__ )
# Generate dummy inputs according to compute batch and sequence
_a = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(__magic_name__ , return_tensors=__magic_name__ ) )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = -1 , __magic_name__ = -1 , __magic_name__ = False , __magic_name__ = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
else:
_a = self._generate_dummy_inputs_for_causal_lm(
__magic_name__ , batch_size=__magic_name__ , seq_length=__magic_name__ , is_pair=__magic_name__ , framework=__magic_name__ )
return common_inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
else:
_a = super(__magic_name__ , self )._flatten_past_key_values_(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@property
def __UpperCAmelCase ( self ) -> float:
return 1e-4
| 168 | 1 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCAmelCase = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowercase__ ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Tuple=None , __snake_case : Union[str, Any]=None , __snake_case : Optional[int]=None , __snake_case : List[str]=None , __snake_case : Optional[int]=None , __snake_case : Any=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ : Dict = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ : List[str] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase_ : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=False , _UpperCamelCase=9_9 , _UpperCamelCase=1_6 , _UpperCamelCase=2 , _UpperCamelCase=4 , _UpperCamelCase=4 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=3_2 , _UpperCamelCase=2 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=0.02 , ) -> List[str]:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = seq_length
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : Tuple = intermediate_size
UpperCAmelCase_ : Optional[int] = hidden_act
UpperCAmelCase_ : str = hidden_dropout_prob
UpperCAmelCase_ : Any = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[Any] = eos_token_id
UpperCAmelCase_ : Any = pad_token_id
UpperCAmelCase_ : Union[str, Any] = bos_token_id
UpperCAmelCase_ : List[str] = initializer_range
def __UpperCAmelCase ( self ) -> Any:
UpperCAmelCase_ : List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase_ : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase_ : Any = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase_ : int = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = 2_0
UpperCAmelCase_ : List[str] = model_class_name(_A )
UpperCAmelCase_ : List[Any] = model.encode(inputs_dict['input_ids'] )
UpperCAmelCase_ : Any = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCAmelCase_ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase_ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
UpperCAmelCase_ : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : Any = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase_ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCAmelCase_ : List[Any] = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
UpperCAmelCase_ : Optional[Any] = model.decode(_A , _A )
UpperCAmelCase_ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Tuple:
UpperCAmelCase_ : Dict = 2_0
UpperCAmelCase_ : Dict = model_class_name(_A )
UpperCAmelCase_ : Union[str, Any] = model.encode(inputs_dict['input_ids'] )
UpperCAmelCase_ : List[str] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
UpperCAmelCase_ : Dict = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ : Tuple = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase_ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
UpperCAmelCase_ : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
UpperCAmelCase_ : Dict = model.decode(_A , _A , decoder_attention_mask=_A )
UpperCAmelCase_ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = 9_9
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : List[str] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase_ : Tuple = input_ids.shape[0]
UpperCAmelCase_ : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : List[Any] = self._get_config_and_data()
UpperCAmelCase_ : str = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase_ : int = lm_model(input_ids=_A )
UpperCAmelCase_ : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _A )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
UpperCAmelCase_ : Optional[Any] = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase_ : Any = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
UpperCAmelCase_ : Optional[int] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase_ : Optional[int] = lm_model(input_ids=_A , decoder_input_ids=_A )
UpperCAmelCase_ : Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _A )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
UpperCAmelCase_ : Dict = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase_ : Tuple = np.equal(_A , 1 ).astype(np.floataa ).sum()
UpperCAmelCase_ : List[str] = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase (snake_case_ , unittest.TestCase , snake_case_ ):
'''simple docstring'''
_snake_case : List[str] = True
_snake_case : int = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_snake_case : Optional[int] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Optional[Any] = FlaxBlenderbotModelTester(self )
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Dict = self._prepare_for_class(_A , _A )
UpperCAmelCase_ : Dict = model_class(_A )
@jax.jit
def encode_jitted(_UpperCamelCase , _UpperCamelCase=None , **_UpperCamelCase ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('JIT Enabled' ):
UpperCAmelCase_ : Dict = encode_jitted(**_A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase_ : int = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : Tuple = model_class(_A )
UpperCAmelCase_ : int = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
UpperCAmelCase_ : Any = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('JIT Enabled' ):
UpperCAmelCase_ : Union[str, Any] = decode_jitted(**_A ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCAmelCase_ : Dict = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __UpperCAmelCase ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ : int = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ : Any = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def __UpperCAmelCase ( self ) -> int:
UpperCAmelCase_ : Optional[int] = {'num_beams': 1, 'early_stopping': True, 'min_length': 1_5, 'max_length': 2_5}
UpperCAmelCase_ : int = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
UpperCAmelCase_ : Any = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_A )
UpperCAmelCase_ : Optional[Any] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
UpperCAmelCase_ : Optional[int] = ['Sam']
UpperCAmelCase_ : Optional[Any] = tokenizer(_A , return_tensors='jax' )
UpperCAmelCase_ : int = model.generate(**_A , **_A )
UpperCAmelCase_ : Tuple = 'Sam is a great name. It means "sun" in Gaelic.'
UpperCAmelCase_ : Union[str, Any] = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 350 |
from __future__ import annotations
def lowercase__ ( __snake_case : list[int] , __snake_case : int ):
'''simple docstring'''
if len(__snake_case ) < k or k < 0:
raise ValueError('Invalid Input' )
UpperCAmelCase_ : int = sum(array[:k] )
for i in range(len(__snake_case ) - k ):
UpperCAmelCase_ : List[Any] = current_sum - array[i] + array[i + k]
UpperCAmelCase_ : List[Any] = max(__snake_case , __snake_case )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__UpperCAmelCase = [randint(-1000, 1000) for i in range(100)]
__UpperCAmelCase = randint(0, 110)
print(F'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 145 | 0 |
from timeit import timeit
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowerCAmelCase = 0
while number:
number &= number - 1
result += 1
return result
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('the value of input must not be negative' )
__lowerCAmelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def a_ ( ):
def do_benchmark(lowerCAmelCase_ : int ) -> None:
__lowerCAmelCase = 'import __main__ as z'
print(F"""Benchmark when {number = }:""" )
print(F"""{get_set_bits_count_using_modulo_operator(lowerCAmelCase_ ) = }""" )
__lowerCAmelCase = timeit('z.get_set_bits_count_using_modulo_operator(25)', setup=lowerCAmelCase_ )
print(F"""timeit() runs in {timing} seconds""" )
print(F"""{get_set_bits_count_using_brian_kernighans_algorithm(lowerCAmelCase_ ) = }""" )
__lowerCAmelCase = timeit(
'z.get_set_bits_count_using_brian_kernighans_algorithm(25)', setup=lowerCAmelCase_, )
print(F"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(lowerCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 284 |
def a_ ( lowerCAmelCase_ : int ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__lowerCAmelCase = 4
__lowerCAmelCase = (1 << p) - 1
for _ in range(p - 2 ):
__lowerCAmelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 284 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : int = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class lowerCamelCase_ ( a_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'glpn'
def __init__( self : int ,__lowerCamelCase : List[Any]=3 ,__lowerCamelCase : Tuple=4 ,__lowerCamelCase : Optional[int]=[2, 2, 2, 2] ,__lowerCamelCase : List[str]=[8, 4, 2, 1] ,__lowerCamelCase : Optional[int]=[32, 64, 1_60, 2_56] ,__lowerCamelCase : List[Any]=[7, 3, 3, 3] ,__lowerCamelCase : Dict=[4, 2, 2, 2] ,__lowerCamelCase : List[str]=[1, 2, 5, 8] ,__lowerCamelCase : int=[4, 4, 4, 4] ,__lowerCamelCase : Union[str, Any]="gelu" ,__lowerCamelCase : Any=0.0 ,__lowerCamelCase : str=0.0 ,__lowerCamelCase : str=0.02 ,__lowerCamelCase : Tuple=0.1 ,__lowerCamelCase : List[Any]=1e-6 ,__lowerCamelCase : Optional[int]=64 ,__lowerCamelCase : Optional[int]=10 ,__lowerCamelCase : Tuple=-1 ,**__lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = num_channels
a = num_encoder_blocks
a = depths
a = sr_ratios
a = hidden_sizes
a = patch_sizes
a = strides
a = mlp_ratios
a = num_attention_heads
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = drop_path_rate
a = layer_norm_eps
a = decoder_hidden_size
a = max_depth
a = head_in_index
| 358 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Dict = {
"""facebook/vit-mae-base""": """https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json""",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = 'vit_mae'
def __init__( self : Dict ,__lowerCamelCase : Any=7_68 ,__lowerCamelCase : Optional[Any]=12 ,__lowerCamelCase : List[str]=12 ,__lowerCamelCase : Optional[int]=30_72 ,__lowerCamelCase : int="gelu" ,__lowerCamelCase : Union[str, Any]=0.0 ,__lowerCamelCase : Optional[int]=0.0 ,__lowerCamelCase : Dict=0.02 ,__lowerCamelCase : List[Any]=1e-12 ,__lowerCamelCase : Dict=2_24 ,__lowerCamelCase : str=16 ,__lowerCamelCase : Union[str, Any]=3 ,__lowerCamelCase : Optional[Any]=True ,__lowerCamelCase : Dict=16 ,__lowerCamelCase : List[str]=5_12 ,__lowerCamelCase : int=8 ,__lowerCamelCase : int=20_48 ,__lowerCamelCase : Optional[Any]=0.75 ,__lowerCamelCase : int=False ,**__lowerCamelCase : Any ,):
'''simple docstring'''
super().__init__(**__lowerCamelCase )
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = initializer_range
a = layer_norm_eps
a = image_size
a = patch_size
a = num_channels
a = qkv_bias
a = decoder_num_attention_heads
a = decoder_hidden_size
a = decoder_num_hidden_layers
a = decoder_intermediate_size
a = mask_ratio
a = norm_pix_loss
| 330 | 0 |
"""simple docstring"""
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = [0] * len(lowerCamelCase_ )
for i in range(1 , len(lowerCamelCase_ ) ):
# use last results for better performance - dynamic programming
__A = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__A = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__A = j
return prefix_result
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
return max(prefix_function(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 266 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =["""image_processor""", """tokenizer"""]
snake_case_ ="""Pix2StructImageProcessor"""
snake_case_ =("""T5Tokenizer""", """T5TokenizerFast""")
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : str = False
super().__init__(__lowerCamelCase ,__lowerCamelCase )
def __call__(self ,__lowerCamelCase=None ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = 20_48 ,__lowerCamelCase = 0 ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = True ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowerCAmelCase__ : List[str] = self.tokenizer
lowerCAmelCase__ : List[str] = self.tokenizer(
text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowerCAmelCase__ : int = self.image_processor(
__lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,**__lowerCamelCase )
else:
# add pixel_values and bbox
lowerCAmelCase__ : List[str] = self.image_processor(
__lowerCamelCase ,return_tensors=__lowerCamelCase ,max_patches=__lowerCamelCase ,header_text=__lowerCamelCase ,**__lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
lowerCAmelCase__ : List[str] = self.tokenizer(
text=__lowerCamelCase ,add_special_tokens=__lowerCamelCase ,padding=__lowerCamelCase ,truncation=__lowerCamelCase ,max_length=__lowerCamelCase ,stride=__lowerCamelCase ,pad_to_multiple_of=__lowerCamelCase ,return_attention_mask=__lowerCamelCase ,return_overflowing_tokens=__lowerCamelCase ,return_special_tokens_mask=__lowerCamelCase ,return_offsets_mapping=__lowerCamelCase ,return_token_type_ids=__lowerCamelCase ,return_length=__lowerCamelCase ,verbose=__lowerCamelCase ,return_tensors=__lowerCamelCase ,**__lowerCamelCase ,)
if "attention_mask" in text_encoding:
lowerCAmelCase__ : List[str] = text_encoding.pop('''attention_mask''' )
if "input_ids" in text_encoding:
lowerCAmelCase__ : Dict = text_encoding.pop('''input_ids''' )
else:
lowerCAmelCase__ : int = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase ,**__lowerCamelCase )
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.tokenizer.model_input_names
lowerCAmelCase__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 129 | 0 |
"""simple docstring"""
from __future__ import annotations
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] ) -> Any: # noqa: E741
while r - l > 1:
_snake_case = (l + r) // 2
if v[m] >= key:
_snake_case = m
else:
_snake_case = m # noqa: E741
return r
def _UpperCAmelCase ( __lowerCamelCase : list[int] ) -> int:
if len(lowercase_ ) == 0:
return 0
_snake_case = [0] * len(lowercase_ )
_snake_case = 1
_snake_case = v[0]
for i in range(1 , len(lowercase_ ) ):
if v[i] < tail[0]:
_snake_case = v[i]
elif v[i] > tail[length - 1]:
_snake_case = v[i]
length += 1
else:
_snake_case = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
"""simple docstring"""
from __future__ import annotations
def _UpperCAmelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int ) -> list[list[int]]:
_snake_case = []
_snake_case = []
_snake_case = 0
_snake_case = sum(__lowerCamelCase )
create_state_space_tree(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return result
def _UpperCAmelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : list[int] , __lowerCamelCase : list[list[int]] , __lowerCamelCase : int , ) -> None:
if sum(__lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(__lowerCamelCase )) < max_sum:
return
if sum(__lowerCamelCase ) == max_sum:
result.append(__lowerCamelCase )
return
for index in range(__lowerCamelCase , len(__lowerCamelCase ) ):
create_state_space_tree(
__lowerCamelCase , __lowerCamelCase , index + 1 , [*path, nums[index]] , __lowerCamelCase , remaining_nums_sum - nums[index] , )
UpperCAmelCase__ = [3, 34, 4, 12, 5, 2]
UpperCAmelCase__ = 9
UpperCAmelCase__ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 40 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__UpperCAmelCase =logging.get_logger(__name__)
@dataclass
class a__ :
def __init__( self : Union[str, Any] , a : Union[str, Any]=False , a : Union[str, Any]=False , a : Optional[Any]=6.0 , a : Dict=None , a : str=False , a : List[Any]=False , a : Optional[Any]=None , a : Optional[Any]="fp4" , a : Union[str, Any]=False , **a : Optional[Any] , ):
"""simple docstring"""
__lowerCamelCase = load_in_abit
__lowerCamelCase = load_in_abit
__lowerCamelCase = llm_inta_threshold
__lowerCamelCase = llm_inta_skip_modules
__lowerCamelCase = llm_inta_enable_fpaa_cpu_offload
__lowerCamelCase = llm_inta_has_fpaa_weight
__lowerCamelCase = bnb_abit_quant_type
__lowerCamelCase = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__lowerCamelCase = torch.floataa
elif isinstance(a , a ):
__lowerCamelCase = getattr(a , a )
elif isinstance(a , torch.dtype ):
__lowerCamelCase = bnb_abit_compute_dtype
else:
raise ValueError('''bnb_4bit_compute_dtype must be a string or a torch.dtype''' )
self.post_init()
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
if not isinstance(self.llm_inta_threshold , a ):
raise ValueError('''llm_int8_threshold must be a float''' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , a ):
raise ValueError('''llm_int8_skip_modules must be a list of strings''' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , a ):
raise ValueError('''llm_int8_enable_fp32_cpu_offload must be a boolean''' )
if not isinstance(self.llm_inta_has_fpaa_weight , a ):
raise ValueError('''llm_int8_has_fp16_weight must be a boolean''' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('''bnb_4bit_compute_dtype must be torch.dtype''' )
if not isinstance(self.bnb_abit_quant_type , a ):
raise ValueError('''bnb_4bit_quant_type must be a string''' )
if not isinstance(self.bnb_abit_use_double_quant , a ):
raise ValueError('''bnb_4bit_use_double_quant must be a boolean''' )
if self.load_in_abit and not version.parse(importlib.metadata.version('''bitsandbytes''' ) ) >= version.parse(
'''0.39.0''' ):
raise ValueError(
'''4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version''' )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return self.load_in_abit or self.load_in_abit
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , a : Dict , a : Optional[int] , **a : int ):
"""simple docstring"""
__lowerCamelCase = cls(**a )
__lowerCamelCase = []
for key, value in kwargs.items():
if hasattr(a , a ):
setattr(a , a , a )
to_remove.append(a )
for key in to_remove:
kwargs.pop(a , a )
if return_unused_kwargs:
return config, kwargs
else:
return config
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Union[str, os.PathLike] ):
"""simple docstring"""
with open(a , '''w''' , encoding='''utf-8''' ) as writer:
__lowerCamelCase = self.to_dict()
__lowerCamelCase = json.dumps(a , indent=2 , sort_keys=a ) + '''\n'''
writer.write(a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = copy.deepcopy(self.__dict__ )
__lowerCamelCase = str(output['''bnb_4bit_compute_dtype'''] ).split('''.''' )[1]
return output
def __repr__( self : List[Any] ):
"""simple docstring"""
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : bool = True ):
"""simple docstring"""
if use_diff is True:
__lowerCamelCase = self.to_diff_dict()
else:
__lowerCamelCase = self.to_dict()
return json.dumps(a , indent=2 , sort_keys=a ) + "\n"
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
__lowerCamelCase = self.to_dict()
# get the default config dict
__lowerCamelCase = BitsAndBytesConfig().to_dict()
__lowerCamelCase = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__lowerCamelCase = value
return serializable_config_dict
| 67 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__snake_case = logging.getLogger()
def __lowerCAmelCase ( ) -> str:
"""simple docstring"""
snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument("-f" )
snake_case : Tuple = parser.parse_args()
return args.f
def __lowerCAmelCase ( lowercase : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case : Any = {}
snake_case : int = os.path.join(lowercase , "all_results.json" )
if os.path.exists(lowercase ):
with open(lowercase , "r" ) as f:
snake_case : Optional[int] = json.load(lowercase )
else:
raise ValueError(F'can\'t find {path}' )
return results
def __lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
snake_case : Union[str, Any] = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
__snake_case = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowerCAmelCase ( snake_case_ ):
@classmethod
def lowerCamelCase ( cls ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[int] = tempfile.mkdtemp()
snake_case : int = os.path.join(cls.tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
snake_case : Union[str, Any] = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def lowerCamelCase ( cls ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : str = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
snake_case : Union[str, Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "glue_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Dict = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
snake_case : Any = get_results(UpperCamelCase__ )
self.assertLess(result["perplexity"] , 100 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "clm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
snake_case : str = F'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Dict = get_results(UpperCamelCase__ )
self.assertLess(result["perplexity"] , 42 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "mlm_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = 7 if get_gpu_count() > 1 else 2
snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
snake_case : List[str] = F'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : str = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertLess(result["train_loss"] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "ner_no_trainer" ) ) )
@unittest.skip(reason="Fix me @muellerzr" )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : List[Any] = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Dict = get_results(UpperCamelCase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["eval_f1"] , 28 )
self.assertGreaterEqual(result["eval_exact"] , 28 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "qa_no_trainer" ) ) )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
snake_case : Tuple = self.get_auto_remove_tmp_dir()
snake_case : List[str] = F'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Optional[Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "swag_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
snake_case : Tuple = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Any = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_rouge1"] , 10 )
self.assertGreaterEqual(result["eval_rouge2"] , 2 )
self.assertGreaterEqual(result["eval_rougeL"] , 7 )
self.assertGreaterEqual(result["eval_rougeLsum"] , 7 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "summarization_no_trainer" ) ) )
@slow
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = self.get_auto_remove_tmp_dir()
snake_case : int = F'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
snake_case : Optional[int] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_bleu"] , 30 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "epoch_0" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "translation_no_trainer" ) ) )
@slow
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : str = logging.StreamHandler(sys.stdout )
logger.addHandler(UpperCamelCase__ )
snake_case : Dict = self.get_auto_remove_tmp_dir()
snake_case : Dict = F'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
snake_case : Optional[Any] = get_results(UpperCamelCase__ )
self.assertGreaterEqual(result["eval_overall_accuracy"] , 0.10 )
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def lowerCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = self.get_auto_remove_tmp_dir()
snake_case : str = F'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append("--fp16" )
run_command(self._launch_args + testargs )
snake_case : str = get_results(UpperCamelCase__ )
# The base model scores a 25%
self.assertGreaterEqual(result["eval_accuracy"] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "step_1" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase__ , "image_classification_no_trainer" ) ) )
| 203 | 0 |
"""simple docstring"""
def snake_case_(_UpperCamelCase = 1_000 ) -> int:
"""simple docstring"""
_snake_case = 2**power
_snake_case = 0
while n:
_snake_case, _snake_case = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 357 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase_ ( __lowercase , unittest.TestCase ):
UpperCamelCase_ : Union[str, Any] = CLIPTokenizer
UpperCamelCase_ : Optional[int] = CLIPTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Union[str, Any] = {}
UpperCamelCase_ : Optional[Any] = False
def UpperCamelCase_ ( self : Union[str, Any] ) -> Dict:
super().setUp()
# fmt: off
_snake_case = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_snake_case = dict(zip(A__ , range(len(A__ ) ) ) )
_snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
_snake_case = {'''unk_token''': '''<unk>'''}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
def UpperCamelCase_ ( self : List[Any] , **A__ : int ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase_ ( self : Any , **A__ : Tuple ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase_ ( self : Optional[Any] , A__ : str ) -> str:
_snake_case = '''lower newer'''
_snake_case = '''lower newer'''
return input_text, output_text
def UpperCamelCase_ ( self : Union[str, Any] ) -> Optional[int]:
_snake_case = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case = '''lower newer'''
_snake_case = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
_snake_case = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
@require_ftfy
def UpperCamelCase_ ( self : Any ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = self.tokenizer_class.from_pretrained(A__ , **A__ )
_snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
_snake_case = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
_snake_case = tokenizer_s.tokenize(A__ )
_snake_case = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_snake_case = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
_snake_case = tokenizer_s.tokenize(A__ )
_snake_case = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Test that the tokenization is identical on unicode of space type
_snake_case = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_snake_case = tokenizer_s.tokenize(A__ )
_snake_case = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Test that the tokenization is identical on unicode of line break type
_snake_case = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_snake_case = tokenizer_s.tokenize(A__ )
_snake_case = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase_ ( self : List[Any] ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_snake_case = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case = f"""{text_of_1_token} {text_of_1_token}"""
_snake_case = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , )
_snake_case = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , )
_snake_case = f""" {text}"""
_snake_case = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , )
_snake_case = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ) + 1, 1 + len(A__ ) + 1 + len(A__ )) , )
def UpperCamelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(A__ ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def UpperCamelCase_ ( self : Dict ) -> Union[str, Any]:
super().test_tokenization_python_rust_equals()
def UpperCamelCase_ ( self : str ) -> Optional[int]:
# CLIP always lower cases letters
pass
| 278 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Optional[Any] = logging.get_logger(__name__)
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = SwinConfig(
embed_dim=192 ,depths=(2, 2, 18, 2) ,num_heads=(6, 12, 24, 48) ,window_size=12 ,out_features=["""stage2""", """stage3""", """stage4"""] ,)
__UpperCamelCase = DetaConfig(
backbone_config=__A ,num_queries=900 ,encoder_ffn_dim=2_048 ,decoder_ffn_dim=2_048 ,num_feature_levels=5 ,assign_first_stage=__A ,with_box_refine=__A ,two_stage=__A ,)
# set labels
__UpperCamelCase = """huggingface/label-files"""
if "o365" in model_name:
__UpperCamelCase = 366
__UpperCamelCase = """object365-id2label.json"""
else:
__UpperCamelCase = 91
__UpperCamelCase = """coco-detection-id2label.json"""
__UpperCamelCase = num_labels
__UpperCamelCase = json.load(open(cached_download(hf_hub_url(__A ,__A ,repo_type="""dataset""" ) ) ,"""r""" ) )
__UpperCamelCase = {int(__A ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", f"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.reduction.weight", f"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.weight", f"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((f"backbone.0.body.layers.{i}.downsample.norm.bias", f"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", f"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", f"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", f"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", f"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.weight", f"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.value_proj.bias", f"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.weight", f"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.self_attn.output_proj.bias", f"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.weight", f"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm1.bias", f"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.weight", f"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear1.bias", f"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.weight", f"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.linear2.bias", f"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.weight", f"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.encoder.layers.{i}.norm2.bias", f"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", f"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", f"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", f"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", f"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", f"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", f"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", f"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.weight", f"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm1.bias", f"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.weight", f"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.self_attn.out_proj.bias", f"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.weight", f"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm2.bias", f"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.weight", f"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear1.bias", f"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.weight", f"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.linear2.bias", f"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.weight", f"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((f"transformer.decoder.layers.{i}.norm3.bias", f"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = dct.pop(__A )
__UpperCamelCase = val
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCamelCase = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
__UpperCamelCase = state_dict.pop(f"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[:dim, :]
__UpperCamelCase = in_proj_bias[: dim]
__UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
__UpperCamelCase = in_proj_bias[
dim : dim * 2
]
__UpperCamelCase = in_proj_weight[
-dim :, :
]
__UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def _lowercase ( __A ,__A ):
'''simple docstring'''
__UpperCamelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCamelCase = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__UpperCamelCase = state_dict.pop(f"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[:hidden_size, :]
__UpperCamelCase = in_proj_bias[:hidden_size]
__UpperCamelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCamelCase = in_proj_weight[-hidden_size:, :]
__UpperCamelCase = in_proj_bias[-hidden_size:]
def _lowercase ( ):
'''simple docstring'''
__UpperCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCamelCase = Image.open(requests.get(__A ,stream=__A ).raw )
return im
@torch.no_grad()
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = get_deta_config(__A )
# load original state dict
if model_name == "deta-swin-large":
__UpperCamelCase = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" ,filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" ,filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f"Model name {model_name} not supported" )
__UpperCamelCase = torch.load(__A ,map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(__A ,param.shape )
# rename keys
__UpperCamelCase = create_rename_keys(__A )
for src, dest in rename_keys:
rename_key(__A ,__A ,__A )
read_in_swin_q_k_v(__A ,config.backbone_config )
read_in_decoder_q_k_v(__A ,__A )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCamelCase = state_dict.pop(__A )
__UpperCamelCase = val
if "input_proj" in key:
__UpperCamelCase = state_dict.pop(__A )
__UpperCamelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCamelCase = state_dict.pop(__A )
__UpperCamelCase = val
# finally, create HuggingFace model and load state dict
__UpperCamelCase = DetaForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
__UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(__A )
# load image processor
__UpperCamelCase = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
__UpperCamelCase = prepare_img()
__UpperCamelCase = processor(images=__A ,return_tensors="""pt""" )
__UpperCamelCase = encoding["""pixel_values"""]
__UpperCamelCase = model(pixel_values.to(__A ) )
# verify logits
print("""Logits:""" ,outputs.logits[0, :3, :3] )
print("""Boxes:""" ,outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCamelCase = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
__UpperCamelCase = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
__UpperCamelCase = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
__UpperCamelCase = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] ,expected_logits.to(__A ) ,atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] ,expected_boxes.to(__A ) ,atol=1E-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
processor.save_pretrained(__A )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f"jozhang97/{model_name}" )
processor.push_to_hub(f"jozhang97/{model_name}" )
if __name__ == "__main__":
a__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
a__ : List[str] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 349 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a__ : Any = logging.get_logger(__name__)
a__ : Optional[int] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
a__ : List[str] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = {}
with open(__A ,"""r""" ) as file:
for line_number, line in enumerate(__A ):
__UpperCamelCase = line.strip()
if line:
__UpperCamelCase = line.split()
__UpperCamelCase = line_number
__UpperCamelCase = words[0]
__UpperCamelCase = value
return result
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
for attribute in key.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = getattr(__A ,__A ).shape
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = shape_pointer.shape
# let's reduce dimension
__UpperCamelCase = value[0]
else:
__UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__UpperCamelCase = getattr(__A ,__A )
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__A ):
__UpperCamelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__UpperCamelCase = """param"""
if weight_type is not None and weight_type != "param":
__UpperCamelCase = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__UpperCamelCase = """.""".join([key, hf_param_name] )
else:
__UpperCamelCase = key
__UpperCamelCase = value if """lm_head""" in full_key else value[0]
a__ : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _lowercase ( __A ,__A ,__A=None ,__A=None ):
'''simple docstring'''
__UpperCamelCase = False
for key, mapped_key in MAPPING.items():
__UpperCamelCase = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__A )[0].split(""".""" )[-2]
__UpperCamelCase = mapped_key.replace("""*""" ,__A )
if "weight_g" in name:
__UpperCamelCase = """weight_g"""
elif "weight_v" in name:
__UpperCamelCase = """weight_v"""
elif "bias" in name:
__UpperCamelCase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase = """weight"""
else:
__UpperCamelCase = None
if hf_dict is not None:
rename_dict(__A ,__A ,__A ,__A ,__A )
else:
set_recursively(__A ,__A ,__A ,__A ,__A )
return is_used
return is_used
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = []
__UpperCamelCase = fairseq_model.state_dict()
__UpperCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__A ,__A ,__A ,__A ,hf_model.config.feat_extract_norm == """group""" ,)
__UpperCamelCase = True
else:
__UpperCamelCase = load_wavaveca_layer(__A ,__A ,__A )
if not is_used:
unused_weights.append(__A )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowercase ( __A ,__A ,__A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = full_name.split("""conv_layers.""" )[-1]
__UpperCamelCase = name.split(""".""" )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__A )
@torch.no_grad()
def _lowercase ( __A ,__A ,__A=None ,__A=None ,__A=True ,__A=False ):
'''simple docstring'''
if config_path is not None:
__UpperCamelCase = WavaVecaConfig.from_pretrained(__A )
else:
__UpperCamelCase = WavaVecaConfig()
if is_seq_class:
__UpperCamelCase = read_txt_into_dict(__A )
__UpperCamelCase = idalabel
__UpperCamelCase = WavaVecaForSequenceClassification(__A )
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
feature_extractor.save_pretrained(__A )
elif is_finetuned:
if dict_path:
__UpperCamelCase = Dictionary.load(__A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__UpperCamelCase = target_dict.pad_index
__UpperCamelCase = target_dict.bos_index
__UpperCamelCase = target_dict.eos_index
__UpperCamelCase = len(target_dict.symbols )
__UpperCamelCase = os.path.join(__A ,"""vocab.json""" )
if not os.path.isdir(__A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__A ) )
return
os.makedirs(__A ,exist_ok=__A )
__UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__UpperCamelCase = 0
__UpperCamelCase = 1
with open(__A ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(__A ,__A )
__UpperCamelCase = WavaVecaCTCTokenizer(
__A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=__A ,)
__UpperCamelCase = True if config.feat_extract_norm == """layer""" else False
__UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__A ,return_attention_mask=__A ,)
__UpperCamelCase = WavaVecaProcessor(feature_extractor=__A ,tokenizer=__A )
processor.save_pretrained(__A )
__UpperCamelCase = WavaVecaForCTC(__A )
else:
__UpperCamelCase = WavaVecaForPreTraining(__A )
if is_finetuned or is_seq_class:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__UpperCamelCase = argparse.Namespace(task="""audio_pretraining""" )
__UpperCamelCase = fairseq.tasks.setup_task(__A )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__A )
__UpperCamelCase = model[0].eval()
recursively_load_weights(__A ,__A ,not is_finetuned )
hf_wavavec.save_pretrained(__A )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
a__ : Optional[int] = parser.parse_args()
a__ : str = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 349 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase : List[str] = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def _lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =XLNetConfig.from_json_file(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_SCREAMING_SNAKE_CASE =finetuning_task
_SCREAMING_SNAKE_CASE =GLUE_TASKS_NUM_LABELS[finetuning_task]
_SCREAMING_SNAKE_CASE =XLNetForSequenceClassification(_UpperCamelCase )
elif "squad" in finetuning_task:
_SCREAMING_SNAKE_CASE =finetuning_task
_SCREAMING_SNAKE_CASE =XLNetForQuestionAnswering(_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =XLNetLMHeadModel(_UpperCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
_SCREAMING_SNAKE_CASE =os.path.join(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =os.path.join(_UpperCamelCase , _UpperCamelCase )
print(f"Save PyTorch model to {os.path.abspath(_UpperCamelCase )}" )
torch.save(model.state_dict() , _UpperCamelCase )
print(f"Save configuration file to {os.path.abspath(_UpperCamelCase )}" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
lowerCamelCase : List[str] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 114 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : str ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
for ch in input_str:
_SCREAMING_SNAKE_CASE =ord(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =pow(2 , _UpperCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "M-CLIP"
def __init__(self : Optional[Any] , UpperCAmelCase_ : List[Any]=1_024 , UpperCAmelCase_ : int=768 , **UpperCAmelCase_ : Dict) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =transformerDimSize
lowerCamelCase__: Tuple =imageDimSize
super().__init__(**UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = MCLIPConfig
def __init__(self : List[str] , UpperCAmelCase_ : Union[str, Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Dict) ->Dict:
'''simple docstring'''
super().__init__(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: str =XLMRobertaModel(UpperCAmelCase_)
lowerCamelCase__: Any =torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def SCREAMING_SNAKE_CASE_ (self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__: str =self.transformer(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)[0]
lowerCamelCase__: Optional[int] =(embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(UpperCAmelCase_), embs
| 10 |
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: str =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = LongformerTokenizer
_UpperCAmelCase : Dict = True
_UpperCAmelCase : List[str] = LongformerTokenizerFast
_UpperCAmelCase : str = True
def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__ : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase__ : Optional[Any] = dict(zip(A , range(len(A ) ) ) )
lowerCamelCase__ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase__ : Tuple = {'''unk_token''': '''<unk>'''}
lowerCamelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def __lowerCamelCase ( self : Optional[int] , **A : Dict ) ->Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def __lowerCamelCase ( self : Union[str, Any] , **A : Any ) ->Dict:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def __lowerCamelCase ( self : int , A : Optional[Any] ) ->Tuple:
lowerCamelCase__ : int = '''lower newer'''
lowerCamelCase__ : Tuple = '''lower newer'''
return input_text, output_text
def __lowerCamelCase ( self : List[str] ) ->Dict:
lowerCamelCase__ : List[str] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ : Union[str, Any] = '''lower newer'''
lowerCamelCase__ : Optional[int] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowerCamelCase__ : Union[str, Any] = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A , A )
lowerCamelCase__ : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCamelCase__ : Optional[int] = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def __lowerCamelCase ( self : List[str] ) ->Optional[Any]:
lowerCamelCase__ : Union[str, Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=A ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=A ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def __lowerCamelCase ( self : Dict ) ->Dict:
lowerCamelCase__ : List[str] = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=A )
lowerCamelCase__ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A )
lowerCamelCase__ : Optional[Any] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=A , add_prefix_space=A )
lowerCamelCase__ : Optional[int] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=A , add_prefix_space=A )
lowerCamelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(A )
lowerCamelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowerCamelCase ( self : Any ) ->List[Any]:
lowerCamelCase__ : Optional[int] = self.get_tokenizer()
lowerCamelCase__ : Optional[int] = '''Encode this sequence.'''
lowerCamelCase__ : Optional[int] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowerCamelCase__ : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
lowerCamelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A , A )
lowerCamelCase__ : str = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
lowerCamelCase__ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A , A )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowerCamelCase__ : Optional[int] = tokenizer.encode(A , add_special_tokens=A )
lowerCamelCase__ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A , A )
# Testing spaces after special tokens
lowerCamelCase__ : Optional[int] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space
lowerCamelCase__ : Dict = tokenizer.convert_tokens_to_ids(A )
lowerCamelCase__ : Optional[int] = '''Encode <mask> sequence'''
lowerCamelCase__ : str = '''Encode <mask>sequence'''
lowerCamelCase__ : Optional[Any] = tokenizer.encode(A )
lowerCamelCase__ : List[str] = encoded.index(A )
lowerCamelCase__ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A , A )
lowerCamelCase__ : Optional[Any] = tokenizer.encode(A )
lowerCamelCase__ : Optional[int] = encoded.index(A )
lowerCamelCase__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A , A )
def __lowerCamelCase ( self : str ) ->str:
pass
def __lowerCamelCase ( self : Dict ) ->Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
lowerCamelCase__ : str = self.tokenizer_class.from_pretrained(A , **A )
lowerCamelCase__ : List[str] = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase__ : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
lowerCamelCase__ : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase__ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase__ : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCamelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase__ : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCamelCase__ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , A )
self.assertEqual(post_processor_state['''add_prefix_space'''] , A )
self.assertEqual(post_processor_state['''trim_offsets'''] , A )
def __lowerCamelCase ( self : int ) ->Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase__ : int = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowerCamelCase__ : Any = F"{text_of_1_token} {text_of_1_token}"
lowerCamelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase__ : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
lowerCamelCase__ : int = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase__ : Optional[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
lowerCamelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase__ : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
lowerCamelCase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase__ : Tuple = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
lowerCamelCase__ : List[str] = F" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCamelCase__ : str = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase__ : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
lowerCamelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase__ : int = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
lowerCamelCase__ : Tuple = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
lowerCamelCase__ : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
| 367 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : Optional[int] , A : Union[str, Any] , A : Any=7 , A : Optional[int]=3 , A : Tuple=3_0 , A : List[Any]=4_0_0 , A : str=True , A : Optional[int]=None , A : Tuple=True , A : Union[str, Any]=1 / 2_5_5 , A : Any=True , A : Optional[int]=[0.5, 0.5, 0.5] , A : Optional[int]=[0.5, 0.5, 0.5] , A : str=True , ) ->List[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCamelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3}
lowerCamelCase__ : List[str] = parent
lowerCamelCase__ : Union[str, Any] = batch_size
lowerCamelCase__ : int = num_channels
lowerCamelCase__ : Optional[Any] = min_resolution
lowerCamelCase__ : List[Any] = max_resolution
lowerCamelCase__ : int = do_resize
lowerCamelCase__ : List[str] = size
lowerCamelCase__ : Union[str, Any] = do_rescale
lowerCamelCase__ : Optional[int] = rescale_factor
lowerCamelCase__ : List[str] = do_normalize
lowerCamelCase__ : Tuple = image_mean
lowerCamelCase__ : str = image_std
lowerCamelCase__ : List[Any] = do_pad
def __lowerCamelCase ( self : List[str] ) ->Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self : Tuple , A : int , A : List[str]=False ) ->int:
if not batched:
lowerCamelCase__ : Union[str, Any] = image_inputs[0]
if isinstance(A , Image.Image ):
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = image.size
else:
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase__ : List[Any] = int(self.size['''shortest_edge'''] * h / w )
lowerCamelCase__ : List[Any] = self.size['''shortest_edge''']
elif w > h:
lowerCamelCase__ : List[Any] = self.size['''shortest_edge''']
lowerCamelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h )
else:
lowerCamelCase__ : List[Any] = self.size['''shortest_edge''']
lowerCamelCase__ : Any = self.size['''shortest_edge''']
else:
lowerCamelCase__ : Optional[Any] = []
for image in image_inputs:
lowerCamelCase__ , lowerCamelCase__ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase__ : Dict = max(A , key=lambda A : item[0] )[0]
lowerCamelCase__ : Dict = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self : Optional[int] ) ->Dict:
lowerCamelCase__ : Optional[int] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self : Dict ) ->Any:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_rescale''' ) )
self.assertTrue(hasattr(A , '''rescale_factor''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
self.assertTrue(hasattr(A , '''do_pad''' ) )
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
lowerCamelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8, '''longest_edge''': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , A )
lowerCamelCase__ : str = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2, '''longest_edge''': 8_4} )
self.assertEqual(image_processor.do_pad , A )
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]:
pass
def __lowerCamelCase ( self : str ) ->Optional[Any]:
# Initialize image_processing
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowerCamelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ : int = self.image_processor_tester.get_expected_values(A , batched=A )
lowerCamelCase__ : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : List[str] ) ->List[str]:
# Initialize image_processing
lowerCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowerCamelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ : List[Any] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self : Any ) ->Any:
# Initialize image_processing
lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowerCamelCase__ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ : str = image_processing(A , return_tensors='''pt''' ).pixel_values
lowerCamelCase__ , lowerCamelCase__ : str = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self : Tuple ) ->List[Any]:
# prepare image and target
lowerCamelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowerCamelCase__ : Union[str, Any] = json.loads(f.read() )
lowerCamelCase__ : List[str] = {'''image_id''': 3_9_7_6_9, '''annotations''': target}
# encode them
lowerCamelCase__ : Optional[int] = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50''' )
lowerCamelCase__ : List[Any] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase__ : Optional[int] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase__ : Any = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase__ : Dict = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase__ : str = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase__ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase__ : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowerCamelCase__ : str = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase__ : str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def __lowerCamelCase ( self : Optional[Any] ) ->List[str]:
# prepare image, target and masks_path
lowerCamelCase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowerCamelCase__ : Optional[Any] = json.loads(f.read() )
lowerCamelCase__ : Union[str, Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 3_9_7_6_9, '''segments_info''': target}
lowerCamelCase__ : List[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowerCamelCase__ : Any = DetrImageProcessor.from_pretrained('''facebook/detr-resnet-50-panoptic''' )
lowerCamelCase__ : Tuple = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowerCamelCase__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowerCamelCase__ : str = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowerCamelCase__ : int = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowerCamelCase__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowerCamelCase__ : Optional[Any] = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowerCamelCase__ : int = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowerCamelCase__ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowerCamelCase__ : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowerCamelCase__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowerCamelCase__ : List[Any] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowerCamelCase__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 265 | 0 |
__UpperCAmelCase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def UpperCamelCase ( snake_case__ : float ) -> str:
assert type(snake_case__ ) in (int, float) and decimal == int(snake_case__ )
UpperCamelCase : Dict = int(snake_case__ )
UpperCamelCase : Optional[Any] = ''
UpperCamelCase : Optional[Any] = False
if decimal < 0:
UpperCamelCase : List[Any] = True
decimal *= -1
while decimal > 0:
UpperCamelCase , UpperCamelCase : Dict = divmod(snake_case__ , 16 )
UpperCamelCase : Dict = values[remainder] + hexadecimal
UpperCamelCase : str = '0x' + hexadecimal
if negative:
UpperCamelCase : List[Any] = '-' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 119 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ) -> int:
# Initialise PyTorch model
UpperCamelCase : Any = MobileBertConfig.from_json_file(snake_case__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCamelCase : Tuple = MobileBertForPreTraining(snake_case__ )
# Load weights from tf checkpoint
UpperCamelCase : int = load_tf_weights_in_mobilebert(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , snake_case__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--mobilebert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained MobileBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 119 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE__ = """LayoutLMv3ImageProcessor"""
SCREAMING_SNAKE_CASE__ = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__(self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , SCREAMING_SNAKE_CASE_ , )
UpperCamelCase__ = kwargs.pop("""feature_extractor""" )
UpperCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
UpperCamelCase__ = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCamelCase__ = features["""words"""]
UpperCamelCase__ = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , stride=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , return_overflowing_tokens=SCREAMING_SNAKE_CASE_ , return_special_tokens_mask=SCREAMING_SNAKE_CASE_ , return_offsets_mapping=SCREAMING_SNAKE_CASE_ , return_length=SCREAMING_SNAKE_CASE_ , verbose=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# add pixel values
UpperCamelCase__ = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
UpperCamelCase__ = self.get_overflowing_images(SCREAMING_SNAKE_CASE_ , encoded_inputs["""overflow_to_sample_mapping"""] )
UpperCamelCase__ = images
return encoded_inputs
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
UpperCamelCase__ = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F" {len(SCREAMING_SNAKE_CASE_ )} and {len(SCREAMING_SNAKE_CASE_ )}" )
return images_with_overflow
def UpperCAmelCase_ (self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def UpperCAmelCase_ (self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase_ (self ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
@property
def UpperCAmelCase_ (self ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , SCREAMING_SNAKE_CASE_ , )
return self.image_processor
| 178 |
import collections
import os
import re
from pathlib import Path
lowerCamelCase_ = '''src/transformers'''
# Matches is_xxx_available()
lowerCamelCase_ = re.compile(r'''is\_([a-z_]*)_available()''')
# Catches a one-line _import_struct = {xxx}
lowerCamelCase_ = re.compile(r'''^_import_structure\s+=\s+\{([^\}]+)\}''')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowerCamelCase_ = re.compile(r'''\s+"\S*":\s+\[([^\]]*)\]''')
# Catches a line if not is_foo_available
lowerCamelCase_ = re.compile(r'''^\s*if\s+not\s+is\_[a-z_]*\_available\(\)''')
# Catches a line _import_struct["bla"].append("foo")
lowerCamelCase_ = re.compile(r'''^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)''')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowerCamelCase_ = re.compile(r'''^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]''')
# Catches a line with an object between quotes and a comma: "MyModel",
lowerCamelCase_ = re.compile(r'''^\s+"([^"]+)",''')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowerCamelCase_ = re.compile(r'''^\s+\[([^\]]+)\]''')
# Catches a line with from foo import bar, bla, boo
lowerCamelCase_ = re.compile(r'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''')
# Catches a line with try:
lowerCamelCase_ = re.compile(r'''^\s*try:''')
# Catches a line with else:
lowerCamelCase_ = re.compile(r'''^\s*else:''')
def __magic_name__ ( __a : List[Any] ):
'''simple docstring'''
if _re_test_backend.search(__a ) is None:
return None
UpperCamelCase__ = [b[0] for b in _re_backend.findall(__a )]
backends.sort()
return "_and_".join(__a )
def __magic_name__ ( __a : Dict ):
'''simple docstring'''
with open(__a , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = 0
while line_index < len(__a ) and not lines[line_index].startswith("""_import_structure = {""" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__a ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase__ = []
while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None:
UpperCamelCase__ = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__a ):
UpperCamelCase__ = _re_one_line_import_struct.search(__a ).groups()[0]
UpperCamelCase__ = re.findall(R"""\[([^\]]+)\]""" , __a )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(""", """ )] )
line_index += 1
continue
UpperCamelCase__ = _re_import_struct_key_value.search(__a )
if single_line_import_search is not None:
UpperCamelCase__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(__a ) > 0]
objects.extend(__a )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("""if TYPE_CHECKING""" ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ):
UpperCamelCase__ = lines[line_index]
if _re_import_struct_add_one.search(__a ) is not None:
objects.append(_re_import_struct_add_one.search(__a ).groups()[0] )
elif _re_import_struct_add_many.search(__a ) is not None:
UpperCamelCase__ = _re_import_struct_add_many.search(__a ).groups()[0].split(""", """ )
UpperCamelCase__ = [obj[1:-1] for obj in imports if len(__a ) > 0]
objects.extend(__a )
elif _re_between_brackets.search(__a ) is not None:
UpperCamelCase__ = _re_between_brackets.search(__a ).groups()[0].split(""", """ )
UpperCamelCase__ = [obj[1:-1] for obj in imports if len(__a ) > 0]
objects.extend(__a )
elif _re_quote_object.search(__a ) is not None:
objects.append(_re_quote_object.search(__a ).groups()[0] )
elif line.startswith(""" """ * 8 + """\"""" ):
objects.append(line[9:-3] )
elif line.startswith(""" """ * 12 + """\"""" ):
objects.append(line[13:-3] )
line_index += 1
UpperCamelCase__ = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase__ = []
while (
line_index < len(__a )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("""else""" )
):
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase__ = {"""none""": objects}
# Let's continue with backend-specific objects
while line_index < len(__a ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase__ = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase__ = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ):
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_import.search(__a )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """ ) )
elif line.startswith(""" """ * 12 ):
objects.append(line[12:-2] )
line_index += 1
UpperCamelCase__ = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __magic_name__ ( __a : Dict , __a : Dict ):
'''simple docstring'''
def find_duplicates(__a : Optional[Any] ):
return [k for k, v in collections.Counter(__a ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase__ = []
for key in import_dict_objects.keys():
UpperCamelCase__ = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"Duplicate _import_structure definitions for: {duplicate_imports}" )
UpperCamelCase__ = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase__ = """base imports""" if key == """none""" else f"{key} backend"
errors.append(f"Differences for {name}:" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f" {a} in TYPE_HINT but not in _import_structure." )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f" {a} in _import_structure but not in TYPE_HINT." )
return errors
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = []
for root, _, files in os.walk(__a ):
if "__init__.py" in files:
UpperCamelCase__ = os.path.join(__a , """__init__.py""" )
UpperCamelCase__ = parse_init(__a )
if objects is not None:
UpperCamelCase__ = analyze_results(*__a )
if len(__a ) > 0:
UpperCamelCase__ = f"Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"
failures.append("""\n""".join(__a ) )
if len(__a ) > 0:
raise ValueError("""\n\n""".join(__a ) )
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = []
for path, directories, files in os.walk(__a ):
for folder in directories:
# Ignore private modules
if folder.startswith("""_""" ):
directories.remove(__a )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__a ) / folder).glob("""*.py""" ) ) ) == 0:
continue
UpperCamelCase__ = str((Path(__a ) / folder).relative_to(__a ) )
UpperCamelCase__ = short_path.replace(os.path.sep , """.""" )
submodules.append(__a )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase__ = str((Path(__a ) / fname).relative_to(__a ) )
UpperCamelCase__ = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" )
if len(submodule.split(""".""" ) ) == 1:
submodules.append(__a )
return submodules
lowerCamelCase_ = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __magic_name__ ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
UpperCamelCase__ = direct_transformers_import(__a )
UpperCamelCase__ = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(__a , """__init__.py""" ) , """r""" ) as f:
UpperCamelCase__ = f.read()
import_structure_keys.update(set(re.findall(R"""import_structure\[\"([^\"]*)\"\]""" , __a ) ) )
UpperCamelCase__ = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(__a ) > 0:
UpperCamelCase__ = """\n""".join(f"- {module}" for module in module_not_registered )
raise ValueError(
"""The following submodules are not properly registed in the main init of Transformers:\n"""
f"{list_of_modules}\n"
"""Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 178 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : Optional[int] = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[str] = "ibert"
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[Any]=30522 , UpperCAmelCase_ : Optional[Any]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : int=3072 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[int]=1E-12 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : List[Any]="absolute" , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : str="none" , **UpperCAmelCase_ : int , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : List[str] = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : Dict = num_attention_heads
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[str] = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : List[str] = initializer_range
lowerCAmelCase : List[str] = layer_norm_eps
lowerCAmelCase : int = position_embedding_type
lowerCAmelCase : str = quant_mode
lowerCAmelCase : List[Any] = force_dequant
class __A ( lowerCAmelCase ):
@property
def lowercase__ ( self : Union[str, Any] ):
if self.task == "multiple-choice":
lowerCAmelCase : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 138 |
import math
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> float:
'''simple docstring'''
return math.pow(_UpperCAmelCase, 2 ) - a
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
return 2 * x
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> float:
'''simple docstring'''
lowerCAmelCase : Any = 2.0
while start <= a:
lowerCAmelCase : Dict = math.pow(_UpperCAmelCase, 2 )
return start
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase = 9_999, _UpperCAmelCase = 0.0_0_0_0_0_0_0_0_0_0_0_0_0_1 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError('math domain error' )
lowerCAmelCase : Optional[Any] = get_initial_point(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
lowerCAmelCase : Any = value
lowerCAmelCase : int = value - fx(_UpperCAmelCase, _UpperCAmelCase ) / fx_derivative(_UpperCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 138 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1600, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=_UpperCAmelCase , )
assert hasattr(self , 'env' )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[str] ):
_A = {
'enabled': True,
'processes_per_host': 8,
}
_A = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_A = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_A = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=_UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCAmelCase , hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} , metric_definitions=self.env.metric_definitions , distribution=_UpperCAmelCase , py_version='py36' , )
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : str ):
TrainingJobAnalytics(_UpperCAmelCase ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : str ):
_A = self.create_estimator(_UpperCAmelCase )
# run training
estimator.fit()
# result dataframe
_A = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_A = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_A = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_A = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _UpperCAmelCase )
| 350 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
a = logging.getLogger(__name__)
def _snake_case ( _snake_case : str , _snake_case : Tuple ) -> Any:
'''simple docstring'''
if os.path.exists(_snake_case ):
if os.path.exists(os.path.join(_snake_case , 'config.json' ) ) and os.path.isfile(
os.path.join(_snake_case , 'config.json' ) ):
os.remove(os.path.join(_snake_case , 'config.json' ) )
if os.path.exists(os.path.join(_snake_case , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(_snake_case , 'pytorch_model.bin' ) ):
os.remove(os.path.join(_snake_case , 'pytorch_model.bin' ) )
else:
os.makedirs(_snake_case )
model.save_pretrained(_snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[int]=False ) -> Tuple:
'''simple docstring'''
_A = 2
if unlogit:
_A = torch.pow(_snake_case , _snake_case )
_A = p * torch.log(_snake_case )
_A = 0
return -plogp.sum(dim=-1 )
def _snake_case ( _snake_case : Optional[Any] ) -> int:
'''simple docstring'''
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(_snake_case ) ) ) )
for row in range(len(_snake_case ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def _snake_case ( _snake_case : List[str] , _snake_case : Dict , _snake_case : List[str] , _snake_case : Union[str, Any]=True , _snake_case : Any=True , _snake_case : List[str]=None , _snake_case : List[Any]=False ) -> int:
'''simple docstring'''
_A , _A = model.config.num_hidden_layers, model.config.num_attention_heads
_A = torch.zeros(_snake_case , _snake_case ).to(args.device )
_A = torch.zeros(_snake_case , _snake_case ).to(args.device )
if head_mask is None:
_A = torch.ones(_snake_case , _snake_case ).to(args.device )
head_mask.requires_grad_(requires_grad=_snake_case )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_A = None
_A = 0.0
_A = 0.0
for step, inputs in enumerate(tqdm(_snake_case , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_A = tuple(t.to(args.device ) for t in inputs )
((_A) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_A = model(_snake_case , labels=_snake_case , head_mask=_snake_case )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_A , _A , _A = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_snake_case ):
_A = entropy(attn.detach() , _snake_case )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_snake_case ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_A = 2
_A = torch.pow(torch.pow(_snake_case , _snake_case ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_A = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(_snake_case )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(_snake_case )
logger.info('Head ranked by importance scores' )
_A = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_A = torch.arange(
head_importance.numel() , device=args.device )
_A = head_ranks.view_as(_snake_case )
print_ad_tensor(_snake_case )
return attn_entropy, head_importance, total_loss
def _snake_case ( _snake_case : Any , _snake_case : Tuple , _snake_case : List[Any] ) -> List[str]:
'''simple docstring'''
_A , _A , _A = compute_heads_importance(_snake_case , _snake_case , _snake_case , compute_entropy=_snake_case )
_A = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , _snake_case , original_score * args.masking_threshold )
_A = torch.ones_like(_snake_case )
_A = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_A = original_score
while current_score >= original_score * args.masking_threshold:
_A = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_A = float('Inf' )
_A = head_importance.view(-1 ).sort()[1]
if len(_snake_case ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_A = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_A = new_head_mask.view(-1 )
_A = 0.0
_A = new_head_mask.view_as(_snake_case )
_A = new_head_mask.clone().detach()
print_ad_tensor(_snake_case )
# Compute metric and head importance again
_A , _A , _A = compute_heads_importance(
_snake_case , _snake_case , _snake_case , compute_entropy=_snake_case , head_mask=_snake_case )
_A = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , _snake_case , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(_snake_case )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Dict , _snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_A = datetime.now()
_A , _A , _A = compute_heads_importance(
_snake_case , _snake_case , _snake_case , compute_entropy=_snake_case , compute_importance=_snake_case , head_mask=_snake_case )
_A = 1 / loss
_A = datetime.now() - before_time
_A = sum(p.numel() for p in model.parameters() )
_A = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_snake_case ) )
}
for k, v in heads_to_prune.items():
if isinstance(_snake_case , _snake_case ):
_A = [
v,
]
assert sum(len(_snake_case ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_snake_case )
_A = sum(p.numel() for p in model.parameters() )
_A = datetime.now()
_A , _A , _A = compute_heads_importance(
_snake_case , _snake_case , _snake_case , compute_entropy=_snake_case , compute_importance=_snake_case , head_mask=_snake_case , actually_pruned=_snake_case , )
_A = 1 / loss
_A = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _snake_case , _snake_case , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , _snake_case , _snake_case )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(_snake_case , args.output_dir )
def _snake_case ( ) -> Dict:
'''simple docstring'''
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=_snake_case , type=_snake_case , required=_snake_case , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=_snake_case , type=_snake_case , required=_snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=_snake_case , type=_snake_case , required=_snake_case , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=_snake_case , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=_snake_case , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=_snake_case , type=_snake_case , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=_snake_case , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=_snake_case , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=_snake_case , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=_snake_case , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=_snake_case , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=_snake_case , help='Batch size.' )
parser.add_argument('--seed' , type=_snake_case , default=42 )
parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=_snake_case , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_snake_case , default='' , help='Can be used for distant debugging.' )
_A = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_snake_case )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_A = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_A = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_A = torch.device('cuda' , args.local_rank )
_A = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_A = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_A = nn.parallel.DistributedDataParallel(
_snake_case , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_snake_case )
elif args.n_gpu > 1:
_A = nn.DataParallel(_snake_case )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_snake_case )
torch.save(_snake_case , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , _snake_case )
# Prepare dataset
_A = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_A = (torch.from_numpy(_snake_case ),)
_A = TensorDataset(*_snake_case )
_A = RandomSampler(_snake_case )
_A = DataLoader(_snake_case , sampler=_snake_case , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_snake_case , _snake_case , _snake_case )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_A = mask_heads(_snake_case , _snake_case , _snake_case )
prune_heads(_snake_case , _snake_case , _snake_case , _snake_case )
if __name__ == "__main__":
main()
| 271 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case = {
"configuration_owlvit": [
"OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"OwlViTConfig",
"OwlViTOnnxConfig",
"OwlViTTextConfig",
"OwlViTVisionConfig",
],
"processing_owlvit": ["OwlViTProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["OwlViTFeatureExtractor"]
_snake_case = ["OwlViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OwlViTModel",
"OwlViTPreTrainedModel",
"OwlViTTextModel",
"OwlViTVisionModel",
"OwlViTForObjectDetection",
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 | """simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_a : str= logging.get_logger(__name__)
_a : str= {"vocab_file": "spiece.model"}
_a : Tuple= {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
_a : int= {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
_a : Optional[int]= 0
_a : str= 1
_a : Tuple= 2
_a : str= 3
_a : Optional[Any]= 4
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : str = """left"""
def __init__(self : List[Any] , _A : List[str] , _A : int=False , _A : Tuple=True , _A : Optional[Any]=False , _A : List[Any]="<s>" , _A : Dict="</s>" , _A : str="<unk>" , _A : Optional[Any]="<sep>" , _A : Optional[Any]="<pad>" , _A : Optional[Any]="<cls>" , _A : Dict="<mask>" , _A : List[Any]=["<eop>", "<eod>"] , _A : Optional[Dict[str, Any]] = None , **_A : List[str] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__snake_case : str = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else mask_token
__snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__snake_case : Tuple = 3
__snake_case : Optional[int] = do_lower_case
__snake_case : Union[str, Any] = remove_space
__snake_case : Dict = keep_accents
__snake_case : str = vocab_file
__snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_A)
@property
def _lowercase (self : Dict) -> List[str]:
return len(self.sp_model)
def _lowercase (self : Dict) -> Union[str, Any]:
__snake_case : str = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__(self : Union[str, Any]) -> List[str]:
__snake_case : Optional[Any] = self.__dict__.copy()
__snake_case : Union[str, Any] = None
return state
def __setstate__(self : Union[str, Any] , _A : Optional[Any]) -> str:
__snake_case : Optional[int] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
__snake_case : List[Any] = {}
__snake_case : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _lowercase (self : Any , _A : Tuple) -> List[str]:
if self.remove_space:
__snake_case : List[Any] = ' '.join(inputs.strip().split())
else:
__snake_case : Tuple = inputs
__snake_case : int = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
__snake_case : str = unicodedata.normalize('NFKD' , _A)
__snake_case : Tuple = ''.join([c for c in outputs if not unicodedata.combining(_A)])
if self.do_lower_case:
__snake_case : Union[str, Any] = outputs.lower()
return outputs
def _lowercase (self : List[Any] , _A : str) -> List[str]:
__snake_case : int = self.preprocess_text(_A)
__snake_case : Dict = self.sp_model.encode(_A , out_type=_A)
__snake_case : Union[str, Any] = []
for piece in pieces:
if len(_A) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
__snake_case : List[str] = self.sp_model.EncodeAsPieces(piece[:-1].replace(_A , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
__snake_case : List[str] = cur_pieces[1:]
else:
__snake_case : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(_A)
else:
new_pieces.append(_A)
return new_pieces
def _lowercase (self : Union[str, Any] , _A : Union[str, Any]) -> Any:
return self.sp_model.PieceToId(_A)
def _lowercase (self : Tuple , _A : str) -> Optional[int]:
return self.sp_model.IdToPiece(_A)
def _lowercase (self : List[str] , _A : Dict) -> List[Any]:
__snake_case : str = ''.join(_A).replace(_A , ' ').strip()
return out_string
def _lowercase (self : Dict , _A : List[int] , _A : bool = False , _A : bool = None , _A : bool = True , **_A : str , ) -> str:
__snake_case : Tuple = kwargs.pop('use_source_tokenizer' , _A)
__snake_case : Tuple = self.convert_ids_to_tokens(_A , skip_special_tokens=_A)
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__snake_case : List[str] = []
__snake_case : str = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A))
__snake_case : List[Any] = []
sub_texts.append(_A)
else:
current_sub_text.append(_A)
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_A))
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__snake_case : Optional[int] = ''.join(_A)
__snake_case : str = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__snake_case : str = self.clean_up_tokenization(_A)
return clean_text
else:
return text
def _lowercase (self : Dict , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : int = [self.sep_token_id]
__snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase (self : List[str] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A)
if token_ids_a is not None:
return ([0] * len(_A)) + [1] + ([0] * len(_A)) + [1, 1]
return ([0] * len(_A)) + [1, 1]
def _lowercase (self : Dict , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[int] = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def _lowercase (self : Tuple , _A : str , _A : Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(_A):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
__snake_case : str = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(_A) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _A)
elif not os.path.isfile(self.vocab_file):
with open(_A , 'wb') as fi:
__snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(_A)
return (out_vocab_file,)
| 172 | 0 |
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_UpperCAmelCase : int = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , *A_ , A_=None , A_=None , A_=None , **A_ ) -> int:
"""simple docstring"""
super().__init__(*A_ , **A_ )
UpperCamelCase = eval_examples
UpperCamelCase = post_process_function
UpperCamelCase = quant_trainer_args
UpperCamelCase = 128 # default number of calibration samples
def __UpperCamelCase ( self , A_=None ) -> str:
"""simple docstring"""
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.' )
UpperCamelCase = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCamelCase = self._remove_unused_columns(A_ , description='Calibration' )
return DataLoader(
A_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=A_ , )
def __UpperCamelCase ( self , A_=None ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.train_dataset if calib_dataset is None else calib_dataset
UpperCamelCase = self.get_calib_dataloader(A_ )
UpperCamelCase = self.model
quant_trainer.configure_model(A_ , self.quant_trainer_args , calib=A_ )
model.eval()
quant_trainer.enable_calibration(A_ )
logger.info('***** Running calibration *****' )
logger.info(F''' Num examples = {self.calib_num}''' )
logger.info(F''' Batch size = {calib_dataloader.batch_size}''' )
for step, inputs in enumerate(A_ ):
# Prediction step
UpperCamelCase , UpperCamelCase , UpperCamelCase = self.prediction_step(A_ , A_ , prediction_loss_only=A_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(A_ , self.quant_trainer_args )
UpperCamelCase = model
def __UpperCamelCase ( self , A_=None , A_=None , A_=None , A_ = "eval" ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase = self.get_eval_dataloader(A_ )
UpperCamelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase = self.compute_metrics
UpperCamelCase = None
UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase = eval_loop(
A_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , )
finally:
UpperCamelCase = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCamelCase = self.post_process_function(A_ , A_ , output.predictions )
UpperCamelCase = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
UpperCamelCase = metrics.pop(A_ )
self.log(A_ )
else:
UpperCamelCase = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , A_ )
return metrics
def __UpperCamelCase ( self , A_ , A_ , A_=None , A_ = "test" ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.get_test_dataloader(A_ )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase = self.compute_metrics
UpperCamelCase = None
UpperCamelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCamelCase = eval_loop(
A_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A_ , )
finally:
UpperCamelCase = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase = self.post_process_function(A_ , A_ , output.predictions , 'predict' )
UpperCamelCase = self.compute_metrics(A_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
UpperCamelCase = metrics.pop(A_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A_ )
def __UpperCamelCase ( self , A_="./" ) -> str:
"""simple docstring"""
UpperCamelCase = self.eval_dataset
UpperCamelCase = self.get_eval_dataloader(A_ )
UpperCamelCase = next(iter(A_ ) )
# saving device - to make it consistent
UpperCamelCase = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
# convert to tuple
UpperCamelCase = tuple(v.to(A_ ) for k, v in batch.items() )
logger.info('Converting model to be onnx compatible' )
from pytorch_quantization.nn import TensorQuantizer
UpperCamelCase = True
UpperCamelCase = self.model.to(A_ )
model.eval()
model.float()
UpperCamelCase = model.module if hasattr(A_ , 'module' ) else model
quant_trainer.configure_model(A_ , self.quant_trainer_args )
UpperCamelCase = os.path.join(A_ , 'model.onnx' )
logger.info(F'''exporting model to {output_model_file}''' )
UpperCamelCase = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
A_ , A_ , A_ , export_params=A_ , opset_version=13 , do_constant_folding=A_ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=A_ , )
logger.info('onnx export finished' ) | 362 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : List[Any] = KandinskyVaaControlnetImgaImgPipeline
__lowercase : Optional[Any] = ["image_embeds", "negative_image_embeds", "image", "hint"]
__lowercase : Any = ["image_embeds", "negative_image_embeds", "image", "hint"]
__lowercase : Union[str, Any] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowercase : Optional[int] = False
@property
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
return 32
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.time_input_dim
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.dummy_unet
UpperCamelCase = self.dummy_movq
UpperCamelCase = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.0_0085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
UpperCamelCase = DDIMScheduler(**A_ )
UpperCamelCase = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __UpperCamelCase ( self , A_ , A_=0 ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
# create init_image
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert('RGB' ).resize((256, 256) )
# create hint
UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
if str(A_ ).startswith('mps' ):
UpperCamelCase = torch.manual_seed(A_ )
else:
UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = 'cpu'
UpperCamelCase = self.get_dummy_components()
UpperCamelCase = self.pipeline_class(**A_ )
UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase = output.images
UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase = np.array(
[0.5498_5034, 0.5550_9365, 0.5256_1504, 0.557_0494, 0.559_3818, 0.526_3979, 0.5028_5643, 0.506_9846, 0.5119_6736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
UpperCamelCase = init_image.resize((512, 512) )
UpperCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
UpperCamelCase = torch.from_numpy(np.array(A_ ) ).float() / 255.0
UpperCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
UpperCamelCase = 'A robot, 4k photo'
UpperCamelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCamelCase , UpperCamelCase = pipe_prior(
A_ , image=A_ , strength=0.85 , generator=A_ , negative_prompt='' , ).to_tuple()
UpperCamelCase = pipeline(
image=A_ , image_embeds=A_ , negative_image_embeds=A_ , hint=A_ , generator=A_ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='np' , )
UpperCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 110 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a ( snake_case__: List[str] ):
'''simple docstring'''
lowercase_ = {}
lowercase_ = tokenizer(example['''content'''] , truncation=snake_case__ )['''input_ids''']
lowercase_ = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
__a = HfArgumentParser(PretokenizationArguments)
__a = parser.parse_args()
if args.num_workers is None:
__a = multiprocessing.cpu_count()
__a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__a = time.time()
__a = load_dataset(args.dataset_name, split='train')
print(f"Dataset loaded in {time.time()-t_start:.2f}s")
__a = time.time()
__a = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(f"Dataset tokenized in {time.time()-t_start:.2f}s")
__a = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"Data pushed to the hub in {time.time()-t_start:.2f}s")
| 30 |
import os
def a ( ):
'''simple docstring'''
lowercase_ = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' )
with open(snake_case__ ) as file_hand:
return str(sum(int(snake_case__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 30 | 1 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : Any = ['vqvae']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Any:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , mel=_SCREAMING_SNAKE_CASE , vqvae=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , _SCREAMING_SNAKE_CASE ) else 1000
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
UpperCAmelCase : List[str] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCAmelCase : Optional[int] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCAmelCase : List[str] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_SCREAMING_SNAKE_CASE , device=self.device , )
UpperCAmelCase : List[str] = noise
UpperCAmelCase : Tuple = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = self.mel.audio_slice_to_image(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
UpperCAmelCase : str = (input_image / 255) * 2 - 1
UpperCAmelCase : List[str] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCAmelCase : List[str] = self.vqvae.encode(torch.unsqueeze(_SCREAMING_SNAKE_CASE , 0 ) ).latent_dist.sample(
generator=_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase : List[str] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCAmelCase : int = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler.timesteps[start_step - 1] )
UpperCAmelCase : Optional[int] = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCAmelCase : Optional[int] = int(mask_start_secs * pixels_per_second )
UpperCAmelCase : Tuple = int(mask_end_secs * pixels_per_second )
UpperCAmelCase : Dict = self.scheduler.add_noise(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Tuple = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["""sample"""]
else:
UpperCAmelCase : int = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["""sample"""]
if isinstance(self.scheduler , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[Any] = self.scheduler.step(
model_output=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , sample=_SCREAMING_SNAKE_CASE , eta=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , )["""prev_sample"""]
else:
UpperCAmelCase : Optional[Any] = self.scheduler.step(
model_output=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , sample=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
UpperCAmelCase : Union[str, Any] = mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCAmelCase : int = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCAmelCase : List[Any] = 1 / self.vqvae.config.scaling_factor * images
UpperCAmelCase : Dict = self.vqvae.decode(_SCREAMING_SNAKE_CASE )["""sample"""]
UpperCAmelCase : Any = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase : Union[str, Any] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCAmelCase : int = (images * 255).round().astype("""uint8""" )
UpperCAmelCase : Optional[Any] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_SCREAMING_SNAKE_CASE , mode="""RGB""" ).convert("""L""" ) for _ in images) )
UpperCAmelCase : List[Any] = [self.mel.image_to_audio(_SCREAMING_SNAKE_CASE ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_SCREAMING_SNAKE_CASE )[:, np.newaxis, :] ) , **ImagePipelineOutput(_SCREAMING_SNAKE_CASE ) )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 50 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , _SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
UpperCAmelCase : str = (sample / 255) * 2 - 1
UpperCAmelCase : int = torch.Tensor(_SCREAMING_SNAKE_CASE ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCAmelCase : Any = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCAmelCase : Tuple = self.scheduler.alphas_cumprod[t]
UpperCAmelCase : int = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCAmelCase : List[str] = 1 - alpha_prod_t
UpperCAmelCase : Dict = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["""sample"""]
UpperCAmelCase : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCAmelCase : List[str] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCAmelCase : Optional[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> torch.Tensor:
'''simple docstring'''
UpperCAmelCase : Optional[int] = acos(torch.dot(torch.flatten(_SCREAMING_SNAKE_CASE ) , torch.flatten(_SCREAMING_SNAKE_CASE ) ) / torch.norm(_SCREAMING_SNAKE_CASE ) / torch.norm(_SCREAMING_SNAKE_CASE ) )
return sin((1 - alpha) * theta ) * xa / sin(_SCREAMING_SNAKE_CASE ) + sin(alpha * theta ) * xa / sin(_SCREAMING_SNAKE_CASE )
| 366 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A: int = logging.get_logger(__name__)
A: Any = {"vocab_file": "vocab.txt"}
A: Optional[int] = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
A: Optional[int] = {
"YituTech/conv-bert-base": 5_1_2,
"YituTech/conv-bert-medium-small": 5_1_2,
"YituTech/conv-bert-small": 5_1_2,
}
A: int = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : int = ConvBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase : Dict = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
UpperCAmelCase : str = do_lower_case
UpperCAmelCase : Optional[int] = strip_accents
UpperCAmelCase : List[str] = tokenize_chinese_chars
UpperCAmelCase : Dict = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = do_lower_case
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase : Dict = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 76 | 0 |
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger('transformers.models.speecht5')
def a__ ( A_, A_, A_ ):
'''simple docstring'''
hf_model.apply_weight_norm()
__magic_name__ = checkpoint["""input_conv.weight_g"""]
__magic_name__ = checkpoint["""input_conv.weight_v"""]
__magic_name__ = checkpoint["""input_conv.bias"""]
for i in range(len(config.upsample_rates ) ):
__magic_name__ = checkpoint[f'''upsamples.{i}.1.weight_g''']
__magic_name__ = checkpoint[f'''upsamples.{i}.1.weight_v''']
__magic_name__ = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
__magic_name__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
__magic_name__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
__magic_name__ = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
__magic_name__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
__magic_name__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
__magic_name__ = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
__magic_name__ = checkpoint["""output_conv.1.weight_g"""]
__magic_name__ = checkpoint["""output_conv.1.weight_v"""]
__magic_name__ = checkpoint["""output_conv.1.bias"""]
hf_model.remove_weight_norm()
@torch.no_grad()
def a__ ( A_, A_, A_, A_=None, A_=None, ):
'''simple docstring'''
if config_path is not None:
__magic_name__ = SpeechTaHifiGanConfig.from_pretrained(A_ )
else:
__magic_name__ = SpeechTaHifiGanConfig()
__magic_name__ = SpeechTaHifiGan(A_ )
__magic_name__ = torch.load(A_ )
load_weights(orig_checkpoint["""model"""]["""generator"""], A_, A_ )
__magic_name__ = np.load(A_ )
__magic_name__ = stats[0].reshape(-1 )
__magic_name__ = stats[1].reshape(-1 )
__magic_name__ = torch.from_numpy(A_ ).float()
__magic_name__ = torch.from_numpy(A_ ).float()
model.save_pretrained(A_ )
if repo_id:
print("""Pushing to the hub...""" )
model.push_to_hub(A_ )
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
__lowerCAmelCase : List[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 88 |
'''simple docstring'''
def snake_case_ (_a : str , _a : str ):
UpperCAmelCase = len(_a ) + 1
UpperCAmelCase = len(_a ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase = [[0 for i in range(_a )] for j in range(_a )]
# since string of zero length match pattern of zero length
UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _a ):
UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _a ):
UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _a ):
for j in range(1 , _a ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase = dp[i - 1][j]
else:
UpperCAmelCase = 0
else:
UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
A ='aab'
A ='c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f"""{input_string} matches the given pattern {pattern}""")
else:
print(f"""{input_string} does not match with the given pattern {pattern}""")
| 34 | 0 |
def snake_case_(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> float:
"""simple docstring"""
_snake_case = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
_snake_case = 1 - (matter_density + radiation_density + dark_energy)
_snake_case = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_snake_case = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__A = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 278 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 278 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = StableDiffusionSAGPipeline
UpperCamelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
torch.manual_seed(0 )
lowercase_ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
lowercase_ : Optional[int] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , )
torch.manual_seed(0 )
lowercase_ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase_ : Any = CLIPTextModel(lowercase_ )
lowercase_ : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowercase_ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Union[str, Any] , lowercase_ : List[Any]=0 ):
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ : int = torch.manual_seed(lowercase_ )
else:
lowercase_ : List[str] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ : Tuple = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[Any] = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
lowercase_ : Union[str, Any] = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : List[str] = """."""
lowercase_ : Tuple = torch.manual_seed(0 )
lowercase_ : Any = sag_pipe(
[prompt] , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
lowercase_ : Tuple = output.images
lowercase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Any = np.array([0.15_68, 0.17_38, 0.16_95, 0.16_93, 0.15_07, 0.17_05, 0.15_47, 0.17_51, 0.19_49] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
lowercase_ : List[Any] = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : List[str] = """."""
lowercase_ : Optional[Any] = torch.manual_seed(0 )
lowercase_ : Any = sag_pipe(
[prompt] , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
lowercase_ : Optional[Any] = output.images
lowercase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : List[Any] = np.array([0.34_59, 0.28_76, 0.25_37, 0.30_02, 0.26_71, 0.21_60, 0.30_26, 0.22_62, 0.23_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
lowercase_ : Tuple = sag_pipe.to(lowercase_ )
sag_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Dict = """."""
lowercase_ : Tuple = torch.manual_seed(0 )
lowercase_ : Optional[Any] = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowercase_ , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
lowercase_ : str = output.images
assert image.shape == (1, 512, 768, 3)
| 239 | '''simple docstring'''
from itertools import product
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> list[int]:
lowercase_ : List[Any] = sides_number
lowercase_ : Dict = max_face_number * dice_number
lowercase_ : List[str] = [0] * (max_total + 1)
lowercase_ : Union[str, Any] = 1
lowercase_ : Dict = range(UpperCAmelCase__ , max_face_number + 1 )
for dice_numbers in product(UpperCAmelCase__ , repeat=UpperCAmelCase__ ):
lowercase_ : Any = sum(UpperCAmelCase__ )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase ( ) -> float:
lowercase_ : Optional[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowercase_ : List[str] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowercase_ : Union[str, Any] = 0
lowercase_ : Tuple = 9
lowercase_ : Optional[int] = 4 * 9
lowercase_ : List[Any] = 6
for peter_total in range(UpperCAmelCase__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowercase_ : str = (4**9) * (6**6)
lowercase_ : List[Any] = peter_wins_count / total_games_number
lowercase_ : Dict = round(UpperCAmelCase__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 239 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(_lowerCamelCase ) , _lowerCamelCase )
return number - int(_lowerCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 300 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead.", __a, )
super().__init__(*__a, **__a)
| 300 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__A = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> int:
"""simple docstring"""
__lowerCamelCase = {}
with open(UpperCamelCase__ , 'r' ) as file:
for line_number, line in enumerate(UpperCamelCase__ ):
__lowerCamelCase = line.strip()
if line:
__lowerCamelCase = line.split()
__lowerCamelCase = line_number
__lowerCamelCase = words[0]
__lowerCamelCase = value
return result
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
for attribute in key.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
__lowerCamelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
__lowerCamelCase = 'param'
if weight_type is not None and weight_type != "param":
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
elif weight_type is not None and weight_type == "param":
__lowerCamelCase = hf_pointer
for attribute in hf_param_name.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = shape_pointer.shape
# let's reduce dimension
__lowerCamelCase = value[0]
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
__lowerCamelCase = PARAM_MAPPING[full_name.split('.' )[-1]]
__lowerCamelCase = 'param'
if weight_type is not None and weight_type != "param":
__lowerCamelCase = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__lowerCamelCase = '.'.join([key, hf_param_name] )
else:
__lowerCamelCase = key
__lowerCamelCase = value if 'lm_head' in full_key else value[0]
__A = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = False
for key, mapped_key in MAPPING.items():
__lowerCamelCase = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = 'weight_g'
elif "weight_v" in name:
__lowerCamelCase = 'weight_v'
elif "bias" in name:
__lowerCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = 'weight'
else:
__lowerCamelCase = None
if hf_dict is not None:
rename_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return is_used
return is_used
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase = True
else:
__lowerCamelCase = load_wavaveca_layer(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowerCamelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : int ) -> Any:
"""simple docstring"""
__lowerCamelCase = full_name.split('conv_layers.' )[-1]
__lowerCamelCase = name.split('.' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Any=None , UpperCamelCase__ : str=None , UpperCamelCase__ : int=True , UpperCamelCase__ : str=False ) -> str:
"""simple docstring"""
if config_path is not None:
__lowerCamelCase = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = WavaVecaConfig()
if is_seq_class:
__lowerCamelCase = read_txt_into_dict(UpperCamelCase__ )
__lowerCamelCase = idalabel
__lowerCamelCase = WavaVecaForSequenceClassification(UpperCamelCase__ )
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
feature_extractor.save_pretrained(UpperCamelCase__ )
elif is_finetuned:
if dict_path:
__lowerCamelCase = Dictionary.load(UpperCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase = target_dict.pad_index
__lowerCamelCase = target_dict.bos_index
__lowerCamelCase = target_dict.eos_index
__lowerCamelCase = len(target_dict.symbols )
__lowerCamelCase = os.path.join(UpperCamelCase__ , 'vocab.json' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCamelCase__ ) )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
__lowerCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCamelCase = 0
__lowerCamelCase = 1
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = WavaVecaCTCTokenizer(
UpperCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCamelCase__ , )
__lowerCamelCase = True if config.feat_extract_norm == 'layer' else False
__lowerCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
__lowerCamelCase = WavaVecaProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
__lowerCamelCase = WavaVecaForCTC(UpperCamelCase__ )
else:
__lowerCamelCase = WavaVecaForPreTraining(UpperCamelCase__ )
if is_finetuned or is_seq_class:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
__lowerCamelCase = argparse.Namespace(task='audio_pretraining' )
__lowerCamelCase = fairseq.tasks.setup_task(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase__ )
__lowerCamelCase = model[0].eval()
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__A = parser.parse_args()
__A = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 90 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = """mvp"""
lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""]
lowerCAmelCase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : Any , UpperCamelCase : Optional[int]=50267 , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=4096 , UpperCamelCase : Dict=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=1024 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=1 , UpperCamelCase : int=0 , UpperCamelCase : int=2 , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=False , UpperCamelCase : int=100 , UpperCamelCase : Optional[Any]=800 , **UpperCamelCase : str , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = classifier_dropout
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_prompt
lowercase__ = prompt_length
lowercase__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ):
lowercase__ = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
| 2 | 0 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_UpperCAmelCase : List[Any] = logging.get_logger("transformers.models.encodec")
_UpperCAmelCase : Tuple = {
"quantizer.vq.layers.*._codebook.inited": "quantizer.layers.*.codebook.inited",
"quantizer.vq.layers.*._codebook.cluster_size": "quantizer.layers.*.codebook.cluster_size",
"quantizer.vq.layers.*._codebook.embed": "quantizer.layers.*.codebook.embed",
"quantizer.vq.layers.*._codebook.embed_avg": "quantizer.layers.*.codebook.embed_avg",
}
_UpperCAmelCase : Any = {
"encoder.model.0.conv.conv": "encoder.layers.0.conv",
"encoder.model.1.block.1.conv.conv": "encoder.layers.1.block.1.conv",
"encoder.model.1.block.3.conv.conv": "encoder.layers.1.block.3.conv",
"encoder.model.1.shortcut.conv.conv": "encoder.layers.1.shortcut.conv",
"encoder.model.3.conv.conv": "encoder.layers.3.conv",
"encoder.model.4.block.1.conv.conv": "encoder.layers.4.block.1.conv",
"encoder.model.4.block.3.conv.conv": "encoder.layers.4.block.3.conv",
"encoder.model.4.shortcut.conv.conv": "encoder.layers.4.shortcut.conv",
"encoder.model.6.conv.conv": "encoder.layers.6.conv",
"encoder.model.7.block.1.conv.conv": "encoder.layers.7.block.1.conv",
"encoder.model.7.block.3.conv.conv": "encoder.layers.7.block.3.conv",
"encoder.model.7.shortcut.conv.conv": "encoder.layers.7.shortcut.conv",
"encoder.model.9.conv.conv": "encoder.layers.9.conv",
"encoder.model.10.block.1.conv.conv": "encoder.layers.10.block.1.conv",
"encoder.model.10.block.3.conv.conv": "encoder.layers.10.block.3.conv",
"encoder.model.10.shortcut.conv.conv": "encoder.layers.10.shortcut.conv",
"encoder.model.12.conv.conv": "encoder.layers.12.conv",
"encoder.model.13.lstm": "encoder.layers.13.lstm",
"encoder.model.15.conv.conv": "encoder.layers.15.conv",
}
_UpperCAmelCase : Any = {
"encoder.model.0.conv.norm": "encoder.layers.0.norm",
"encoder.model.1.block.1.conv.norm": "encoder.layers.1.block.1.norm",
"encoder.model.1.block.3.conv.norm": "encoder.layers.1.block.3.norm",
"encoder.model.1.shortcut.conv.norm": "encoder.layers.1.shortcut.norm",
"encoder.model.3.conv.norm": "encoder.layers.3.norm",
"encoder.model.4.block.1.conv.norm": "encoder.layers.4.block.1.norm",
"encoder.model.4.block.3.conv.norm": "encoder.layers.4.block.3.norm",
"encoder.model.4.shortcut.conv.norm": "encoder.layers.4.shortcut.norm",
"encoder.model.6.conv.norm": "encoder.layers.6.norm",
"encoder.model.7.block.1.conv.norm": "encoder.layers.7.block.1.norm",
"encoder.model.7.block.3.conv.norm": "encoder.layers.7.block.3.norm",
"encoder.model.7.shortcut.conv.norm": "encoder.layers.7.shortcut.norm",
"encoder.model.9.conv.norm": "encoder.layers.9.norm",
"encoder.model.10.block.1.conv.norm": "encoder.layers.10.block.1.norm",
"encoder.model.10.block.3.conv.norm": "encoder.layers.10.block.3.norm",
"encoder.model.10.shortcut.conv.norm": "encoder.layers.10.shortcut.norm",
"encoder.model.12.conv.norm": "encoder.layers.12.norm",
"encoder.model.15.conv.norm": "encoder.layers.15.norm",
}
_UpperCAmelCase : str = {
"decoder.model.0.conv.conv": "decoder.layers.0.conv",
"decoder.model.1.lstm": "decoder.layers.1.lstm",
"decoder.model.3.convtr.convtr": "decoder.layers.3.conv",
"decoder.model.4.block.1.conv.conv": "decoder.layers.4.block.1.conv",
"decoder.model.4.block.3.conv.conv": "decoder.layers.4.block.3.conv",
"decoder.model.4.shortcut.conv.conv": "decoder.layers.4.shortcut.conv",
"decoder.model.6.convtr.convtr": "decoder.layers.6.conv",
"decoder.model.7.block.1.conv.conv": "decoder.layers.7.block.1.conv",
"decoder.model.7.block.3.conv.conv": "decoder.layers.7.block.3.conv",
"decoder.model.7.shortcut.conv.conv": "decoder.layers.7.shortcut.conv",
"decoder.model.9.convtr.convtr": "decoder.layers.9.conv",
"decoder.model.10.block.1.conv.conv": "decoder.layers.10.block.1.conv",
"decoder.model.10.block.3.conv.conv": "decoder.layers.10.block.3.conv",
"decoder.model.10.shortcut.conv.conv": "decoder.layers.10.shortcut.conv",
"decoder.model.12.convtr.convtr": "decoder.layers.12.conv",
"decoder.model.13.block.1.conv.conv": "decoder.layers.13.block.1.conv",
"decoder.model.13.block.3.conv.conv": "decoder.layers.13.block.3.conv",
"decoder.model.13.shortcut.conv.conv": "decoder.layers.13.shortcut.conv",
"decoder.model.15.conv.conv": "decoder.layers.15.conv",
}
_UpperCAmelCase : Optional[Any] = {
"decoder.model.0.conv.norm": "decoder.layers.0.norm",
"decoder.model.3.convtr.norm": "decoder.layers.3.norm",
"decoder.model.4.block.1.conv.norm": "decoder.layers.4.block.1.norm",
"decoder.model.4.block.3.conv.norm": "decoder.layers.4.block.3.norm",
"decoder.model.4.shortcut.conv.norm": "decoder.layers.4.shortcut.norm",
"decoder.model.6.convtr.norm": "decoder.layers.6.norm",
"decoder.model.7.block.1.conv.norm": "decoder.layers.7.block.1.norm",
"decoder.model.7.block.3.conv.norm": "decoder.layers.7.block.3.norm",
"decoder.model.7.shortcut.conv.norm": "decoder.layers.7.shortcut.norm",
"decoder.model.9.convtr.norm": "decoder.layers.9.norm",
"decoder.model.10.block.1.conv.norm": "decoder.layers.10.block.1.norm",
"decoder.model.10.block.3.conv.norm": "decoder.layers.10.block.3.norm",
"decoder.model.10.shortcut.conv.norm": "decoder.layers.10.shortcut.norm",
"decoder.model.12.convtr.norm": "decoder.layers.12.norm",
"decoder.model.13.block.1.conv.norm": "decoder.layers.13.block.1.norm",
"decoder.model.13.block.3.conv.norm": "decoder.layers.13.block.3.norm",
"decoder.model.13.shortcut.conv.norm": "decoder.layers.13.shortcut.norm",
"decoder.model.15.conv.norm": "decoder.layers.15.norm",
}
_UpperCAmelCase : Optional[Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_UpperCAmelCase : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_UpperCAmelCase : str = []
_UpperCAmelCase : Union[str, Any] = []
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
for attribute in key.split("." ):
lowercase :List[str] = getattr(lowerCamelCase, lowerCamelCase )
if weight_type is not None:
lowercase :Optional[Any] = getattr(lowerCamelCase, lowerCamelCase ).shape
else:
lowercase :Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
lowercase :Union[str, Any] = value
elif weight_type == "weight_g":
lowercase :List[str] = value
elif weight_type == "weight_v":
lowercase :Any = value
elif weight_type == "bias":
lowercase :str = value
elif weight_type == "running_mean":
lowercase :Optional[int] = value
elif weight_type == "running_var":
lowercase :List[str] = value
elif weight_type == "num_batches_tracked":
lowercase :Union[str, Any] = value
elif weight_type == "weight_ih_l0":
lowercase :List[Any] = value
elif weight_type == "weight_hh_l0":
lowercase :Optional[int] = value
elif weight_type == "bias_ih_l0":
lowercase :Dict = value
elif weight_type == "bias_hh_l0":
lowercase :Tuple = value
elif weight_type == "weight_ih_l1":
lowercase :Union[str, Any] = value
elif weight_type == "weight_hh_l1":
lowercase :List[Any] = value
elif weight_type == "bias_ih_l1":
lowercase :Any = value
elif weight_type == "bias_hh_l1":
lowercase :Optional[Any] = value
else:
lowercase :str = value
logger.info(F"{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}." )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase , lowercase :int = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :Tuple = []
if model_name == "encodec_24khz" or "encodec_32khz":
lowercase :Union[str, Any] = MAPPING_24K
elif model_name == "encodec_48khz":
lowercase :List[str] = MAPPING_48K
else:
raise ValueError(F"Unsupported model: {model_name}" )
for name, value in orig_dict.items():
if should_ignore(lowerCamelCase, lowerCamelCase ):
logger.info(F"{name} was ignored" )
continue
lowercase :int = False
for key, mapped_key in MAPPING.items():
if "*" in key:
lowercase , lowercase :Union[str, Any] = key.split(".*." )
if prefix in name and suffix in name:
lowercase :Optional[Any] = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("embed" ) and name.endswith("embed_avg" ):
continue
lowercase :Tuple = True
if "*" in mapped_key:
lowercase :List[Any] = name.split(lowerCamelCase )[0].split("." )[-2]
lowercase :Optional[int] = mapped_key.replace("*", lowerCamelCase )
if "weight_g" in name:
lowercase :List[str] = "weight_g"
elif "weight_v" in name:
lowercase :int = "weight_v"
elif "weight_ih_l0" in name:
lowercase :Union[str, Any] = "weight_ih_l0"
elif "weight_hh_l0" in name:
lowercase :List[Any] = "weight_hh_l0"
elif "bias_ih_l0" in name:
lowercase :Union[str, Any] = "bias_ih_l0"
elif "bias_hh_l0" in name:
lowercase :int = "bias_hh_l0"
elif "weight_ih_l1" in name:
lowercase :Any = "weight_ih_l1"
elif "weight_hh_l1" in name:
lowercase :List[Any] = "weight_hh_l1"
elif "bias_ih_l1" in name:
lowercase :Tuple = "bias_ih_l1"
elif "bias_hh_l1" in name:
lowercase :int = "bias_hh_l1"
elif "bias" in name:
lowercase :Optional[int] = "bias"
elif "weight" in name:
lowercase :Any = "weight"
elif "running_mean" in name:
lowercase :List[Any] = "running_mean"
elif "running_var" in name:
lowercase :Dict = "running_var"
elif "num_batches_tracked" in name:
lowercase :int = "num_batches_tracked"
else:
lowercase :str = None
set_recursively(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
continue
if not is_used:
unused_weights.append(lowerCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
@torch.no_grad()
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None, ):
if config_path is not None:
lowercase :Tuple = EncodecConfig.from_pretrained(lowerCamelCase )
else:
lowercase :Optional[int] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
lowercase :int = [8, 5, 4, 4]
lowercase :Tuple = [2.2]
lowercase :Union[str, Any] = 64
lowercase :Dict = 32000
lowercase :Dict = 2048
lowercase :List[Any] = False
lowercase :Optional[Any] = False
lowercase :Any = False
elif model_name == "encodec_48khz":
lowercase :Optional[int] = [8, 5, 4, 2]
lowercase :Tuple = [3.0, 6.0, 12.0, 24.0]
lowercase :Union[str, Any] = 48000
lowercase :Optional[Any] = 2
lowercase :Union[str, Any] = False
lowercase :Optional[Any] = "time_group_norm"
lowercase :List[Any] = True
lowercase :Optional[Any] = 1.0
lowercase :List[str] = 0.01
else:
raise ValueError(F"Unknown model name: {model_name}" )
lowercase :int = EncodecModel(lowerCamelCase )
lowercase :Optional[int] = EncodecFeatureExtractor(
feature_size=config.audio_channels, sampling_rate=config.sampling_rate, chunk_length_s=config.chunk_length_s, overlap=config.overlap, )
feature_extractor.save_pretrained(lowerCamelCase )
lowercase :List[str] = torch.load(lowerCamelCase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
lowercase :Optional[Any] = original_checkpoint["best_state"]
recursively_load_weights(lowerCamelCase, lowerCamelCase, lowerCamelCase )
model.save_pretrained(lowerCamelCase )
if repo_id:
print("Pushing to the hub..." )
feature_extractor.push_to_hub(lowerCamelCase )
model.push_to_hub(lowerCamelCase )
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--model",
default="encodec_24khz",
type=str,
help="The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_UpperCAmelCase : Any = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 158 |
def UpperCAmelCase__ ( lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise TypeError("Input value must be an 'int' type" )
lowercase :int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Dict , lowercase : int , lowercase : int , lowercase : int , lowercase : float , lowercase : int , lowercase : int , lowercase : int , lowercase : int , lowercase : str , lowercase : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case = nn.Embedding(lowercase , lowercase )
_snake_case = nn.Embedding(lowercase , lowercase )
_snake_case = False
_snake_case = nn.Dropout(p=lowercase )
_snake_case = TaConfig(
vocab_size=lowercase , d_model=lowercase , num_heads=lowercase , d_kv=lowercase , d_ff=lowercase , dropout_rate=lowercase , feed_forward_proj=lowercase , is_decoder=lowercase , is_encoder_decoder=lowercase , )
_snake_case = nn.ModuleList()
for lyr_num in range(lowercase ):
_snake_case = TaBlock(lowercase )
self.encoders.append(lowercase )
_snake_case = TaLayerNorm(lowercase )
_snake_case = nn.Dropout(p=lowercase )
def A ( self : Dict , lowercase : Any , lowercase : str ):
'''simple docstring'''
_snake_case = self.token_embedder(lowercase )
_snake_case = encoder_input_tokens.shape[1]
_snake_case = torch.arange(lowercase , device=encoder_input_tokens.device )
x += self.position_encoding(lowercase )
_snake_case = self.dropout_pre(lowercase )
# inverted the attention mask
_snake_case = encoder_input_tokens.size()
_snake_case = self.get_extended_attention_mask(lowercase , lowercase )
for lyr in self.encoders:
_snake_case = lyr(lowercase , lowercase )[0]
_snake_case = self.layer_norm(lowercase )
return self.dropout_post(lowercase ), encoder_inputs_mask | 282 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 1 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase :
@staticmethod
def UpperCAmelCase ( *_lowercase :Tuple , **_lowercase :Union[str, Any] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
__lowerCamelCase = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def UpperCAmelCase ( self :List[Any] , _lowercase :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :List[str] ):
'''simple docstring'''
lowercase__ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
lowercase__ = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def UpperCAmelCase ( self :List[Any] , _lowercase :Optional[Any] , _lowercase :int ):
'''simple docstring'''
lowercase__ = object_detector(examples[0] , threshold=0.0 )
lowercase__ = len(_lowercase )
self.assertGreater(_lowercase , 0 )
self.assertEqual(
_lowercase , [
{
"score": ANY(_lowercase ),
"label": ANY(_lowercase ),
"box": {"xmin": ANY(_lowercase ), "ymin": ANY(_lowercase ), "xmax": ANY(_lowercase ), "ymax": ANY(_lowercase )},
}
for i in range(_lowercase )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def UpperCAmelCase ( self :int ):
'''simple docstring'''
pass
@require_torch
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
lowercase__ = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 2_74, "xmax": 93, "ymax": 2_97}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
] , )
lowercase__ = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 2_74, "xmax": 93, "ymax": 2_97}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
]
] , )
@require_torch
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
lowercase__ = pipeline("zero-shot-object-detection" )
lowercase__ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
] , )
lowercase__ = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
pass
@require_torch
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = 0.2
lowercase__ = pipeline("zero-shot-object-detection" )
lowercase__ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
] , )
@require_torch
@slow
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
lowercase__ = 2
lowercase__ = pipeline("zero-shot-object-detection" )
lowercase__ = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=_lowercase , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
] , )
| 201 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
_snake_case = namedtuple("""covid_data""", """cases deaths recovered""")
def _A ( __magic_name__ = "https://www.worldometers.info/coronavirus/" ):
lowercase__ = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(__magic_name__ ).content ).xpath(__magic_name__ ) )
_snake_case = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 201 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCamelCase : int = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=A , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=A , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=A , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=A , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=A , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=A , type=A , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=A , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=A , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
lowercase__ = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (A ) -> str:
"""simple docstring"""
def fn(A ):
return tokenizer(examples['''text'''] )
return fn
def _SCREAMING_SNAKE_CASE (A ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
lowercase__ = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
lowercase__ = tf.train.Features(feature=A )
lowercase__ = tf.train.Example(features=A )
lowercase__ = example.SerializeToString()
records.append(A )
return records
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
lowercase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ = min(len(A ) , args.limit )
lowercase__ = dataset.select(range(A ) )
print(f"Limiting the dataset to {args.limit} entries." )
lowercase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
lowercase__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ = tokenize_function(A )
lowercase__ = dataset.map(A , batched=A , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A ):
# Concatenate all texts.
lowercase__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ = {
k: [t[i : i + args.max_length] for i in range(0 , A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ = dataset_tokenized.map(A , batched=A , batch_size=1_000 , num_proc=4 )
lowercase__ = 0
lowercase__ = 0
for shard in range(0 , len(A ) , args.shard_size ):
lowercase__ = grouped_dataset[shard : shard + args.shard_size]
lowercase__ = len(dataset_snapshot['''input_ids'''] )
lowercase__ = os.path.join(A , f"dataset-{shard_count}-{records_containing}.tfrecord" )
lowercase__ = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
lowercase__ = serialized_examples[i]
out_file.write(A )
print('''Wrote file {} containing {} records'''.format(A , A ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(f"Total {args.split} records: {total_records}" , file=A )
if __name__ == "__main__":
lowerCamelCase : List[Any] = parse_args()
main(args)
| 2 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowerCamelCase : Any = re.compile(R'([A-Z]+)([A-Z][a-z])')
lowerCamelCase : str = re.compile(R'([a-z\d])([A-Z])')
lowerCamelCase : Optional[int] = re.compile(R'(?<!_)_(?!_)')
lowerCamelCase : List[Any] = re.compile(R'(_{2,})')
lowerCamelCase : str = R'^\w+(\.\w+)*$'
lowerCamelCase : Dict = R'<>:/\|?*'
def _SCREAMING_SNAKE_CASE (A ) -> Any:
"""simple docstring"""
lowercase__ = _uppercase_uppercase_re.sub(R'''\1_\2''' , A )
lowercase__ = _lowercase_uppercase_re.sub(R'''\1_\2''' , A )
return name.lower()
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
lowercase__ = _single_underscore_re.split(A )
lowercase__ = [_multiple_underscores_re.split(A ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(A ) if n != '''''' )
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(A )
def _SCREAMING_SNAKE_CASE (A , A ) -> Optional[Any]:
"""simple docstring"""
if os.path.basename(A ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , A ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(A )}-{split}"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None ) -> List[str]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
if filetype_suffix:
prefix += f".{filetype_suffix}"
lowercase__ = os.path.join(A , A )
return f"{filepath}*"
def _SCREAMING_SNAKE_CASE (A , A , A , A=None , A=None ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = filename_prefix_for_split(A , A )
lowercase__ = os.path.join(A , A )
if shard_lengths:
lowercase__ = len(A )
lowercase__ = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(A )]
if filetype_suffix:
lowercase__ = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
lowercase__ = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 2 | 1 |
"""simple docstring"""
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Dict = len(__UpperCAmelCase )
# We need to create solution object to save path.
_lowercase : int = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
_lowercase : Optional[Any] = run_maze(__UpperCAmelCase , 0 , 0 , __UpperCAmelCase )
if solved:
print("""\n""".join(str(__UpperCAmelCase ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
_lowercase : List[Any] = len(__UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
_lowercase : Any = 1
return True
_lowercase : Optional[int] = (not i < 0) and (not j < 0) # Check lower bounds
_lowercase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
_lowercase : str = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
_lowercase : Optional[int] = 1
# check for directions
if (
run_maze(__UpperCAmelCase , i + 1 , __UpperCAmelCase , __UpperCAmelCase )
or run_maze(__UpperCAmelCase , __UpperCAmelCase , j + 1 , __UpperCAmelCase )
or run_maze(__UpperCAmelCase , i - 1 , __UpperCAmelCase , __UpperCAmelCase )
or run_maze(__UpperCAmelCase , __UpperCAmelCase , j - 1 , __UpperCAmelCase )
):
return True
_lowercase : Optional[Any] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase: Any = logging.get_logger(__name__)
UpperCAmelCase: List[str] = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = "instructblip_vision_model"
def __init__( self ,UpperCAmelCase_=14_08 ,UpperCAmelCase_=61_44 ,UpperCAmelCase_=39 ,UpperCAmelCase_=16 ,UpperCAmelCase_=2_24 ,UpperCAmelCase_=14 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=1E-6 ,UpperCAmelCase_=0.0 ,UpperCAmelCase_=1E-10 ,UpperCAmelCase_=True ,**UpperCAmelCase_ ,):
super().__init__(**UpperCAmelCase_ )
_lowercase : Optional[Any] = hidden_size
_lowercase : Tuple = intermediate_size
_lowercase : List[Any] = num_hidden_layers
_lowercase : Tuple = num_attention_heads
_lowercase : Optional[Any] = patch_size
_lowercase : Optional[Any] = image_size
_lowercase : Union[str, Any] = initializer_range
_lowercase : Optional[Any] = attention_dropout
_lowercase : List[Any] = layer_norm_eps
_lowercase : Optional[int] = hidden_act
_lowercase : Tuple = qkv_bias
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : List[Any] = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = "instructblip_qformer"
def __init__( self ,UpperCAmelCase_=3_05_22 ,UpperCAmelCase_=7_68 ,UpperCAmelCase_=12 ,UpperCAmelCase_=12 ,UpperCAmelCase_=30_72 ,UpperCAmelCase_="gelu" ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=5_12 ,UpperCAmelCase_=0.02 ,UpperCAmelCase_=1E-12 ,UpperCAmelCase_=0 ,UpperCAmelCase_="absolute" ,UpperCAmelCase_=2 ,UpperCAmelCase_=14_08 ,**UpperCAmelCase_ ,):
super().__init__(pad_token_id=UpperCAmelCase_ ,**UpperCAmelCase_ )
_lowercase : List[Any] = vocab_size
_lowercase : List[Any] = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : List[str] = num_attention_heads
_lowercase : Optional[Any] = hidden_act
_lowercase : int = intermediate_size
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Optional[Any] = attention_probs_dropout_prob
_lowercase : List[Any] = max_position_embeddings
_lowercase : Tuple = initializer_range
_lowercase : Optional[int] = layer_norm_eps
_lowercase : Any = position_embedding_type
_lowercase : Dict = cross_attention_frequency
_lowercase : Optional[Any] = encoder_hidden_size
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ):
cls._set_token_in_kwargs(UpperCAmelCase_ )
_lowercase , _lowercase : Dict = cls.get_config_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_lowercase : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
class UpperCamelCase ( snake_case ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = "instructblip"
SCREAMING_SNAKE_CASE_ : List[str] = True
def __init__( self ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=None ,UpperCAmelCase_=32 ,**UpperCAmelCase_ ):
super().__init__(**UpperCAmelCase_ )
if vision_config is None:
_lowercase : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_lowercase : Any = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_lowercase : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_lowercase : int = InstructBlipVisionConfig(**UpperCAmelCase_ )
_lowercase : Optional[int] = InstructBlipQFormerConfig(**UpperCAmelCase_ )
_lowercase : Dict = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_lowercase : str = CONFIG_MAPPING[text_model_type](**UpperCAmelCase_ )
_lowercase : str = self.text_config.tie_word_embeddings
_lowercase : Union[str, Any] = self.text_config.is_encoder_decoder
_lowercase : List[str] = num_query_tokens
_lowercase : List[str] = self.vision_config.hidden_size
_lowercase : Dict = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_lowercase : Union[str, Any] = 1.0
_lowercase : Dict = 0.02
@classmethod
def lowerCamelCase__ ( cls ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ ,):
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**UpperCAmelCase_ ,)
def lowerCamelCase__ ( self ):
_lowercase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_lowercase : int = self.vision_config.to_dict()
_lowercase : Any = self.qformer_config.to_dict()
_lowercase : Any = self.text_config.to_dict()
_lowercase : Optional[int] = self.__class__.model_type
return output
| 336 | 1 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase = logging.getLogger(__name__)
def lowercase ( a__ : Optional[int] , a__ : int ) -> Optional[Any]:
if os.path.exists(_A ):
if os.path.exists(os.path.join(_A , '''config.json''' ) ) and os.path.isfile(
os.path.join(_A , '''config.json''' ) ):
os.remove(os.path.join(_A , '''config.json''' ) )
if os.path.exists(os.path.join(_A , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(_A , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(_A , '''pytorch_model.bin''' ) )
else:
os.makedirs(_A )
model.save_pretrained(_A )
def lowercase ( a__ : List[str] , a__ : Union[str, Any]=False ) -> Union[str, Any]:
_UpperCamelCase = 2
if unlogit:
_UpperCamelCase = torch.pow(_A , _A )
_UpperCamelCase = p * torch.log(_A )
_UpperCamelCase = 0
return -plogp.sum(dim=-1 )
def lowercase ( a__ : int ) -> int:
logger.info('''lv, h >\t''' + '''\t'''.join(F'''{x + 1}''' for x in range(len(_A ) ) ) )
for row in range(len(_A ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '''\t'''.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowercase ( a__ : Any , a__ : str , a__ : int , a__ : int=True , a__ : Tuple=True , a__ : Optional[int]=None , a__ : Optional[Any]=False ) -> Any:
_UpperCamelCase , _UpperCamelCase = model.config.num_hidden_layers, model.config.num_attention_heads
_UpperCamelCase = torch.zeros(_A , _A ).to(args.device )
_UpperCamelCase = torch.zeros(_A , _A ).to(args.device )
if head_mask is None:
_UpperCamelCase = torch.ones(_A , _A ).to(args.device )
head_mask.requires_grad_(requires_grad=_A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_UpperCamelCase = None
_UpperCamelCase = 0.0
_UpperCamelCase = 0.0
for step, inputs in enumerate(tqdm(_A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
_UpperCamelCase = tuple(t.to(args.device ) for t in inputs )
((_UpperCamelCase ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_UpperCamelCase = model(_A , labels=_A , head_mask=_A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_A ):
_UpperCamelCase = entropy(attn.detach() , _A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_UpperCamelCase = 2
_UpperCamelCase = torch.pow(torch.pow(_A , _A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_UpperCamelCase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(_A )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(_A )
logger.info('''Head ranked by importance scores''' )
_UpperCamelCase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_UpperCamelCase = torch.arange(
head_importance.numel() , device=args.device )
_UpperCamelCase = head_ranks.view_as(_A )
print_ad_tensor(_A )
return attn_entropy, head_importance, total_loss
def lowercase ( a__ : Union[str, Any] , a__ : Dict , a__ : Tuple ) -> Union[str, Any]:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = compute_heads_importance(_A , _A , _A , compute_entropy=_A )
_UpperCamelCase = 1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , _A , original_score * args.masking_threshold )
_UpperCamelCase = torch.ones_like(_A )
_UpperCamelCase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_UpperCamelCase = original_score
while current_score >= original_score * args.masking_threshold:
_UpperCamelCase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_UpperCamelCase = float('''Inf''' )
_UpperCamelCase = head_importance.view(-1 ).sort()[1]
if len(_A ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
_UpperCamelCase = current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
_UpperCamelCase = new_head_mask.view(-1 )
_UpperCamelCase = 0.0
_UpperCamelCase = new_head_mask.view_as(_A )
_UpperCamelCase = new_head_mask.clone().detach()
print_ad_tensor(_A )
# Compute metric and head importance again
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = compute_heads_importance(
_A , _A , _A , compute_entropy=_A , head_mask=_A )
_UpperCamelCase = 1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('''Final head mask''' )
print_ad_tensor(_A )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase ( a__ : List[str] , a__ : Union[str, Any] , a__ : List[str] , a__ : int ) -> List[str]:
_UpperCamelCase = datetime.now()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = compute_heads_importance(
_A , _A , _A , compute_entropy=_A , compute_importance=_A , head_mask=_A )
_UpperCamelCase = 1 / loss
_UpperCamelCase = datetime.now() - before_time
_UpperCamelCase = sum(p.numel() for p in model.parameters() )
_UpperCamelCase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_A ) )
}
for k, v in heads_to_prune.items():
if isinstance(_A , _A ):
_UpperCamelCase = [
v,
]
assert sum(len(_A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_A )
_UpperCamelCase = sum(p.numel() for p in model.parameters() )
_UpperCamelCase = datetime.now()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = compute_heads_importance(
_A , _A , _A , compute_entropy=_A , compute_importance=_A , head_mask=_A , actually_pruned=_A , )
_UpperCamelCase = 1 / loss
_UpperCamelCase = datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _A , _A , pruned_num_params / original_num_params * 100 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _A , _A )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 )
save_model(_A , args.output_dir )
def lowercase ( ) -> Any:
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=_A , type=_A , required=_A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=_A , type=_A , required=_A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=_A , type=_A , required=_A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=_A , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=_A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=_A , type=_A , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=_A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=_A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=_A , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=_A , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_A , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=_A , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=_A , default=42 )
parser.add_argument('''--local_rank''' , type=_A , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=_A , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_A , default='''''' , help='''Can be used for distant debugging.''' )
_UpperCamelCase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_UpperCamelCase = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
_UpperCamelCase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_UpperCamelCase = torch.device('''cuda''' , args.local_rank )
_UpperCamelCase = 1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_UpperCamelCase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_UpperCamelCase = nn.parallel.DistributedDataParallel(
_A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_A )
elif args.n_gpu > 1:
_UpperCamelCase = nn.DataParallel(_A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_A )
torch.save(_A , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , _A )
# Prepare dataset
_UpperCamelCase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_UpperCamelCase = (torch.from_numpy(_A ),)
_UpperCamelCase = TensorDataset(*_A )
_UpperCamelCase = RandomSampler(_A )
_UpperCamelCase = DataLoader(_A , sampler=_A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_A , _A , _A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_UpperCamelCase = mask_heads(_A , _A , _A )
prune_heads(_A , _A , _A , _A )
if __name__ == "__main__":
main()
| 256 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowercase__ : Tuple = re.compile(R"\b(a|an|the)\b", re.UNICODE)
lowercase__ : Optional[int] = None
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser("Official evaluation script for SQuAD version 2.0." )
parser.add_argument("data_file" , metavar="data.json" , help="Input data JSON file." )
parser.add_argument("pred_file" , metavar="pred.json" , help="Model predictions." )
parser.add_argument(
"--out-file" , "-o" , metavar="eval.json" , help="Write accuracy metrics to file (default is stdout)." )
parser.add_argument(
"--na-prob-file" , "-n" , metavar="na_prob.json" , help="Model estimates of probability of no answer." )
parser.add_argument(
"--na-prob-thresh" , "-t" , type=_A , default=1.0 , help="Predict \"\" if no-answer probability exceeds this (default = 1.0)." , )
parser.add_argument(
"--out-image-dir" , "-p" , metavar="out_images" , default=_A , help="Save precision-recall curves to directory." )
parser.add_argument("--verbose" , "-v" , action="store_true" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case_ = bool(qa["answers"]["text"] )
return qid_to_has_ans
def lowerCamelCase__ ( _A ):
'''simple docstring'''
def remove_articles(_A ):
return ARTICLES_REGEX.sub(" " , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
snake_case_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if not s:
return []
return normalize_answer(_A ).split()
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
return int(normalize_answer(_A ) == normalize_answer(_A ) )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = get_tokens(_A )
snake_case_ = get_tokens(_A )
snake_case_ = collections.Counter(_A ) & collections.Counter(_A )
snake_case_ = sum(common.values() )
if len(_A ) == 0 or len(_A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = 1.0 * num_same / len(_A )
snake_case_ = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
snake_case_ = qa["id"]
snake_case_ = [t for t in qa["answers"]["text"] if normalize_answer(_A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
snake_case_ = [""]
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
snake_case_ = preds[qid]
# Take max over all gold answers
snake_case_ = max(compute_exact(_A , _A ) for a in gold_answers )
snake_case_ = max(compute_fa(_A , _A ) for a in gold_answers )
return exact_scores, fa_scores
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = {}
for qid, s in scores.items():
snake_case_ = na_probs[qid] > na_prob_thresh
if pred_na:
snake_case_ = float(not qid_to_has_ans[qid] )
else:
snake_case_ = s
return new_scores
def lowerCamelCase__ ( _A , _A , _A=None ):
'''simple docstring'''
if not qid_list:
snake_case_ = len(_A )
return collections.OrderedDict(
[
("exact", 1_00.0 * sum(exact_scores.values() ) / total),
("f1", 1_00.0 * sum(fa_scores.values() ) / total),
("total", total),
] )
else:
snake_case_ = len(_A )
return collections.OrderedDict(
[
("exact", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
("f1", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
("total", total),
] )
def lowerCamelCase__ ( _A , _A , _A ):
'''simple docstring'''
for k in new_eval:
snake_case_ = new_eval[k]
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
plt.step(_A , _A , color="b" , alpha=0.2 , where="post" )
plt.fill_between(_A , _A , step="post" , alpha=0.2 , color="b" )
plt.xlabel("Recall" )
plt.ylabel("Precision" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(_A )
plt.savefig(_A )
plt.clf()
def lowerCamelCase__ ( _A , _A , _A , _A , _A=None , _A=None ):
'''simple docstring'''
snake_case_ = sorted(_A , key=lambda _A : na_probs[k] )
snake_case_ = 0.0
snake_case_ = 1.0
snake_case_ = 0.0
snake_case_ = [1.0]
snake_case_ = [0.0]
snake_case_ = 0.0
for i, qid in enumerate(_A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
snake_case_ = true_pos / float(i + 1 )
snake_case_ = true_pos / float(_A )
if i == len(_A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_A )
recalls.append(_A )
if out_image:
plot_pr_curve(_A , _A , _A , _A )
return {"ap": 1_00.0 * avg_prec}
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
if out_image_dir and not os.path.exists(_A ):
os.makedirs(_A )
snake_case_ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
snake_case_ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , "pr_exact.png" ) , title="Precision-Recall curve for Exact Match score" , )
snake_case_ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , "pr_f1.png" ) , title="Precision-Recall curve for F1 score" , )
snake_case_ = {k: float(_A ) for k, v in qid_to_has_ans.items()}
snake_case_ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , "pr_oracle.png" ) , title="Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)" , )
merge_eval(_A , _A , "pr_exact" )
merge_eval(_A , _A , "pr_f1" )
merge_eval(_A , _A , "pr_oracle" )
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
if not qid_list:
return
snake_case_ = [na_probs[k] for k in qid_list]
snake_case_ = np.ones_like(_A ) / float(len(_A ) )
plt.hist(_A , weights=_A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("Model probability of no-answer" )
plt.ylabel("Proportion of dataset" )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(_A , f"na_prob_hist_{name}.png" ) )
plt.clf()
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
snake_case_ = num_no_ans
snake_case_ = cur_score
snake_case_ = 0.0
snake_case_ = sorted(_A , key=lambda _A : na_probs[k] )
for i, qid in enumerate(_A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
snake_case_ = scores[qid]
else:
if preds[qid]:
snake_case_ = -1
else:
snake_case_ = 0
cur_score += diff
if cur_score > best_score:
snake_case_ = cur_score
snake_case_ = na_probs[qid]
return 1_00.0 * best_score / len(_A ), best_thresh
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ , snake_case_ = find_best_thresh(_A , _A , _A , _A )
snake_case_ , snake_case_ = find_best_thresh(_A , _A , _A , _A )
snake_case_ = best_exact
snake_case_ = exact_thresh
snake_case_ = best_fa
snake_case_ = fa_thresh
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
snake_case_ = json.load(_A )
snake_case_ = dataset_json["data"]
with open(OPTS.pred_file ) as f:
snake_case_ = json.load(_A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
snake_case_ = json.load(_A )
else:
snake_case_ = {k: 0.0 for k in preds}
snake_case_ = make_qid_to_has_ans(_A ) # maps qid to True/False
snake_case_ = [k for k, v in qid_to_has_ans.items() if v]
snake_case_ = [k for k, v in qid_to_has_ans.items() if not v]
snake_case_ , snake_case_ = get_raw_scores(_A , _A )
snake_case_ = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
snake_case_ = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
snake_case_ = make_eval_dict(_A , _A )
if has_ans_qids:
snake_case_ = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , "HasAns" )
if no_ans_qids:
snake_case_ = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , "NoAns" )
if OPTS.na_prob_file:
find_all_best_thresh(_A , _A , _A , _A , _A , _A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_A , _A , _A , _A , _A , OPTS.out_image_dir )
histogram_na_prob(_A , _A , OPTS.out_image_dir , "hasAns" )
histogram_na_prob(_A , _A , OPTS.out_image_dir , "noAns" )
if OPTS.out_file:
with open(OPTS.out_file , "w" ) as f:
json.dump(_A , _A )
else:
print(json.dumps(_A , indent=2 ) )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
main()
| 187 | 0 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowerCAmelCase : List[str] = quote(_UpperCamelCase )
return hfh.hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' , revision=_UpperCamelCase ) | 182 |
"""simple docstring"""
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = size
__lowerCAmelCase : str = [0] * size
__lowerCAmelCase : Any = [0] * size
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
return index | (index + 1)
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
return (index & (index + 1)) - 1
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = value
while index < self.size:
__lowerCAmelCase : Dict = self.get_prev(_SCREAMING_SNAKE_CASE ) + 1
if current_left_border == index:
__lowerCAmelCase : Any = value
else:
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.get_next(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
right -= 1 # Because of right is exclusive
__lowerCAmelCase : Optional[int] = 0
while left <= right:
__lowerCAmelCase : Optional[int] = self.get_prev(_SCREAMING_SNAKE_CASE )
if left <= current_left:
__lowerCAmelCase : Optional[Any] = max(_SCREAMING_SNAKE_CASE , self.tree[right] )
__lowerCAmelCase : Optional[Any] = current_left
else:
__lowerCAmelCase : List[str] = max(_SCREAMING_SNAKE_CASE , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod() | 182 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : Any , *UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , **UpperCamelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
__magic_name__ = eval_examples
__magic_name__ = post_process_function
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str = "eval" ) -> Tuple:
"""simple docstring"""
__magic_name__ = self.eval_dataset if eval_dataset is None else eval_dataset
__magic_name__ = self.get_eval_dataloader(UpperCamelCase__ )
__magic_name__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ = self.compute_metrics
__magic_name__ = None
__magic_name__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__magic_name__ = time.time()
try:
__magic_name__ = eval_loop(
UpperCamelCase__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
__magic_name__ = compute_metrics
__magic_name__ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__magic_name__ = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions )
__magic_name__ = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
else:
__magic_name__ = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCamelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__magic_name__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase__ )
return metrics
def _lowercase ( self : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str = "test" ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.get_test_dataloader(UpperCamelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
__magic_name__ = self.compute_metrics
__magic_name__ = None
__magic_name__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__magic_name__ = time.time()
try:
__magic_name__ = eval_loop(
UpperCamelCase__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase__ , metric_key_prefix=UpperCamelCase__ , )
finally:
__magic_name__ = compute_metrics
__magic_name__ = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCamelCase__ , UpperCamelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__magic_name__ = self.post_process_function(UpperCamelCase__ , UpperCamelCase__ , output.predictions , """predict""" )
__magic_name__ = self.compute_metrics(UpperCamelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__magic_name__ = metrics.pop(UpperCamelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase__ )
| 88 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Tuple = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
__A : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__snake_case = TypeVar("""T""")
class lowercase__ ( Generic[T] ):
A__ : deque[T] # Cache store of keys
A__ : set[T] # References of the keys in cache
A__ : int =1_0 # Maximum capacity of cache
def __init__( self : Optional[int] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = deque()
SCREAMING_SNAKE_CASE__ = set()
if not n:
SCREAMING_SNAKE_CASE__ = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
SCREAMING_SNAKE_CASE__ = n
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE__ = self.dq_store.pop()
self.key_reference.remove(UpperCAmelCase_ )
else:
self.dq_store.remove(UpperCAmelCase_ )
self.dq_store.appendleft(UpperCAmelCase_ )
self.key_reference.add(UpperCAmelCase_ )
def A_ ( self : Dict ):
for k in self.dq_store:
print(UpperCAmelCase_ )
def __repr__( self : Any ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 169 |
import doctest
from collections import deque
import numpy as np
class lowercase__ :
def __init__( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(UpperCAmelCase_ , UpperCAmelCase_ )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(UpperCAmelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(UpperCAmelCase_ )
for j, item in enumerate(UpperCAmelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(UpperCAmelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCAmelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 169 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = "bert-generation"
def __init__( self : str , A : str=50358 , A : int=1024 , A : Optional[Any]=24 , A : Optional[int]=16 , A : str=4096 , A : Tuple="gelu" , A : str=0.1 , A : Dict=0.1 , A : Tuple=512 , A : Tuple=0.02 , A : Optional[int]=1E-12 , A : Union[str, Any]=0 , A : Any=2 , A : Dict=1 , A : Tuple="absolute" , A : List[Any]=True , **A : List[Any] , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : List[str] = num_attention_heads
_UpperCAmelCase : Optional[Any] = hidden_act
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : Optional[int] = position_embedding_type
_UpperCAmelCase : int = use_cache
| 31 | import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18 | 0 |
"""simple docstring"""
import requests
__A = "" # <-- Put your OpenWeatherMap appid here!
__A = "https://api.openweathermap.org/data/2.5/"
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = "Chicago" , __UpperCAmelCase = APPID ) -> dict:
return requests.get(URL_BASE + '''weather''' , params=locals() ).json()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = "Kolkata, India" , __UpperCAmelCase = APPID ) -> dict:
return requests.get(URL_BASE + '''forecast''' , params=locals() ).json()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 5_5.6_8 , __UpperCAmelCase = 1_2.5_7 , __UpperCAmelCase = APPID ) -> dict:
return requests.get(URL_BASE + '''onecall''' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__A = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 357 | """simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A = "<<<<<<< This should probably be modified because it mentions: "
__A = "=======\n>>>>>>>\n"
__A = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__A = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ) -> Tuple:
return ConvertCommand(args.tfds_path , args.datasets_directory )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _snake_case ( _UpperCAmelCase ):
lowercase__: int = parser.add_parser(
'''convert''' , help='''Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.''' , )
train_parser.add_argument(
'''--tfds_path''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.''' , )
train_parser.add_argument(
'''--datasets_directory''' , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''Path to the HuggingFace Datasets folder.''' )
train_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
lowercase__: List[str] = get_logger('''datasets-cli/converting''' )
lowercase__: Optional[Any] = tfds_path
lowercase__: Dict = datasets_directory
def _snake_case ( self ):
if os.path.isdir(self._tfds_path ):
lowercase__: Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
lowercase__: Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError('''--tfds_path is neither a directory nor a file. Please check path.''' )
lowercase__: int = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
lowercase__: Tuple = []
lowercase__: Dict = []
lowercase__: Any = {}
if os.path.isdir(self._tfds_path ):
lowercase__: Dict = os.listdir(_UpperCAmelCase )
else:
lowercase__: Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
lowercase__: Tuple = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[int] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not os.path.isfile(_UpperCAmelCase ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('''Skipping file''' )
continue
with open(_UpperCAmelCase , encoding='''utf-8''' ) as f:
lowercase__: Tuple = f.readlines()
lowercase__: Optional[Any] = []
lowercase__: Dict = False
lowercase__: List[str] = False
lowercase__: List[Any] = []
for line in lines:
lowercase__: List[str] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowercase__: Optional[int] = '''import datasets\n'''
elif "import tensorflow" in out_line:
# order is important here
lowercase__: Dict = ''''''
continue
elif "from absl import logging" in out_line:
lowercase__: Tuple = '''from datasets import logging\n'''
elif "getLogger" in out_line:
lowercase__: Optional[Any] = out_line.replace('''getLogger''' , '''get_logger''' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
lowercase__: Any = True
lowercase__: str = list(filter(lambda _UpperCAmelCase : e in out_line , _UpperCAmelCase ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_UpperCAmelCase ) + '''\n''' )
out_lines.append(_UpperCAmelCase )
out_lines.append(_UpperCAmelCase )
continue
else:
for pattern, replacement in TO_CONVERT:
lowercase__: List[Any] = re.sub(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowercase__: Any = re.match(r'''from\stensorflow_datasets.*import\s([^\.\r\n]+)''' , _UpperCAmelCase )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(''',''' ) )
lowercase__: List[str] = '''from . import ''' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowercase__: Optional[Any] = True
out_lines.append(_UpperCAmelCase )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowercase__: Dict = f_name.replace('''.py''' , '''''' )
lowercase__: Dict = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
lowercase__: Optional[Any] = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_UpperCAmelCase )
if needs_manual_update:
with_manual_update.append(_UpperCAmelCase )
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.writelines(_UpperCAmelCase )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
lowercase__: str = os.path.basename(_UpperCAmelCase )
lowercase__: Union[str, Any] = imports_to_builder_map[f_name.replace('''.py''' , '''''' )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_UpperCAmelCase , _UpperCAmelCase )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 2 | 0 |
"""simple docstring"""
from collections.abc import Callable
def a__ ( SCREAMING_SNAKE_CASE : Callable[[float], float] , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
lowerCAmelCase : float = a
lowerCAmelCase : float = b
if function(SCREAMING_SNAKE_CASE ) == 0: # one of the a or b is a root for the function
return a
elif function(SCREAMING_SNAKE_CASE ) == 0:
return b
elif (
function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("could not find root in given interval." )
else:
lowerCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 1_0**-7: # until precisely equals to 10^-7
if function(SCREAMING_SNAKE_CASE ) == 0:
return mid
elif function(SCREAMING_SNAKE_CASE ) * function(SCREAMING_SNAKE_CASE ) < 0:
lowerCAmelCase : str = mid
else:
lowerCAmelCase : str = mid
lowerCAmelCase : List[Any] = start + (end - start) / 2.0
return mid
def a__ ( SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 108 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :List[str] = logging.get_logger(__name__)
_lowerCAmelCase :Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''falcon'''
a__ =['''past_key_values''']
def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any:
_UpperCAmelCase : int = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[int] = use_cache
_UpperCAmelCase : Any = hidden_dropout
_UpperCAmelCase : Dict = attention_dropout
_UpperCAmelCase : Any = bos_token_id
_UpperCAmelCase : List[Any] = eos_token_id
_UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads
_UpperCAmelCase : Dict = alibi
_UpperCAmelCase : Optional[int] = new_decoder_architecture
_UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True
_UpperCAmelCase : Optional[int] = parallel_attn
_UpperCAmelCase : Optional[int] = bias
super().__init__(bos_token_id=A , eos_token_id=A , **A )
@property
def __lowerCAmelCase ( self ) -> List[str]:
return self.hidden_size // self.num_attention_heads
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return not self.alibi
| 263 | 0 |
from __future__ import annotations
import bisect
def _a ( a :list[int] , a :int , a :int = 0 , a :int = -1 ) -> int:
if hi < 0:
a = len(a )
while lo < hi:
a = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
a = mid + 1
else:
a = mid
return lo
def _a ( a :list[int] , a :int , a :int = 0 , a :int = -1 ) -> int:
if hi < 0:
a = len(a )
while lo < hi:
a = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
a = mid + 1
else:
a = mid
return lo
def _a ( a :list[int] , a :int , a :int = 0 , a :int = -1 ) -> None:
sorted_collection.insert(bisect_left(a , a , a , a ) , a )
def _a ( a :list[int] , a :int , a :int = 0 , a :int = -1 ) -> None:
sorted_collection.insert(bisect_right(a , a , a , a ) , a )
def _a ( a :list[int] , a :int ) -> int | None:
a = 0
a = len(a ) - 1
while left <= right:
a = left + (right - left) // 2
a = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
a = midpoint - 1
else:
a = midpoint + 1
return None
def _a ( a :list[int] , a :int ) -> int | None:
a = bisect.bisect_left(a , a )
if index != len(a ) and sorted_collection[index] == item:
return index
return None
def _a ( a :list[int] , a :int , a :int , a :int ) -> int | None:
if right < left:
return None
a = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(a , a , a , midpoint - 1 )
else:
return binary_search_by_recursion(a , a , midpoint + 1 , a )
if __name__ == "__main__":
UpperCAmelCase__ = input("Enter numbers separated by comma:\n").strip()
UpperCAmelCase__ = sorted(int(item) for item in user_input.split(","))
UpperCAmelCase__ = int(input("Enter a single number to be found in the list:\n"))
UpperCAmelCase__ = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 352 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ = "▁"
UpperCAmelCase__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertGenerationTokenizer
__snake_case = False
__snake_case = True
def __lowerCAmelCase ( self : str ) ->str:
"""simple docstring"""
super().setUp()
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
a = '''<s>'''
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(__UpperCAmelCase ) , 1_002 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
a = BertGenerationTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [285, 46, 10, 170, 382] , )
a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
a = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
a = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __lowerCAmelCase ( self : Any ) ->str:
"""simple docstring"""
a = '''Hello World!'''
a = [18_536, 2_260, 101]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@slow
def __lowerCAmelCase ( self : List[Any] ) ->str:
"""simple docstring"""
a = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
a = [
871,
419,
358,
946,
991,
2_521,
452,
358,
1_357,
387,
7_751,
3_536,
112,
985,
456,
126,
865,
938,
5_400,
5_734,
458,
1_368,
467,
786,
2_462,
5_246,
1_159,
633,
865,
4_519,
457,
582,
852,
2_557,
427,
916,
508,
405,
34_324,
497,
391,
408,
11_342,
1_244,
385,
100,
938,
985,
456,
574,
362,
12_597,
3_200,
3_129,
1_172,
]
self.assertListEqual(__UpperCAmelCase , self.big_tokenizer.encode(__UpperCAmelCase ) )
@require_torch
@slow
def __lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
a = list(self.big_tokenizer.get_vocab().keys() )[:10]
a = ''' '''.join(__UpperCAmelCase )
a = self.big_tokenizer.encode_plus(__UpperCAmelCase , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=__UpperCAmelCase )
a = BertGenerationConfig()
a = BertGenerationEncoder(__UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__UpperCAmelCase )
model(**__UpperCAmelCase )
@slow
def __lowerCAmelCase ( self : str ) ->Optional[Any]:
"""simple docstring"""
a = {'''input_ids''': [[39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114], [448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , )
| 26 | 0 |
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : list , snake_case_ : list , snake_case_ : list , snake_case_ : list ) -> float:
'''simple docstring'''
UpperCAmelCase_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(snake_case_ )] )
UpperCAmelCase_ = np.array(snake_case_ )
UpperCAmelCase_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , snake_case_ ) ) , x.transpose() ) , snake_case_ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : list , snake_case_ : list ) -> float:
'''simple docstring'''
UpperCAmelCase_ = (1, 2, 1)
UpperCAmelCase_ = (1, 1, 0, 7)
UpperCAmelCase_ = SARIMAX(
snake_case_ , exog=snake_case_ , order=snake_case_ , seasonal_order=snake_case_ )
UpperCAmelCase_ = model.fit(disp=snake_case_ , maxiter=6_00 , method="nm" )
UpperCAmelCase_ = model_fit.predict(1 , len(snake_case_ ) , exog=[test_match] )
return result[0]
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : list , snake_case_ : list ) -> float:
'''simple docstring'''
UpperCAmelCase_ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(snake_case_ , snake_case_ )
UpperCAmelCase_ = regressor.predict(snake_case_ )
return y_pred[0]
def lowerCAmelCase_ ( snake_case_ : list ) -> float:
'''simple docstring'''
train_user.sort()
UpperCAmelCase_ = np.percentile(snake_case_ , 25 )
UpperCAmelCase_ = np.percentile(snake_case_ , 75 )
UpperCAmelCase_ = qa - qa
UpperCAmelCase_ = qa - (iqr * 0.1)
return low_lim
def lowerCAmelCase_ ( snake_case_ : list , snake_case_ : float ) -> bool:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in list_vote:
if i > actual_result:
UpperCAmelCase_ = not_safe + 1
else:
if abs(abs(snake_case_ ) - abs(snake_case_ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
SCREAMING_SNAKE_CASE_: List[Any] =[[1_82_31, 0.0, 1], [2_26_21, 1.0, 2], [1_56_75, 0.0, 3], [2_35_83, 1.0, 4]]
SCREAMING_SNAKE_CASE_: Dict =pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
SCREAMING_SNAKE_CASE_: Any =Normalizer().fit_transform(data_input_df.values)
# split data
SCREAMING_SNAKE_CASE_: List[str] =normalize_df[:, 2].tolist()
SCREAMING_SNAKE_CASE_: str =normalize_df[:, 0].tolist()
SCREAMING_SNAKE_CASE_: str =normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
SCREAMING_SNAKE_CASE_: Dict =normalize_df[:, [1, 2]].tolist()
SCREAMING_SNAKE_CASE_: Tuple =x[: len(x) - 1]
SCREAMING_SNAKE_CASE_: List[Any] =x[len(x) - 1 :]
# for linear regression & sarimax
SCREAMING_SNAKE_CASE_: int =total_date[: len(total_date) - 1]
SCREAMING_SNAKE_CASE_: Dict =total_user[: len(total_user) - 1]
SCREAMING_SNAKE_CASE_: Tuple =total_match[: len(total_match) - 1]
SCREAMING_SNAKE_CASE_: Optional[int] =total_date[len(total_date) - 1 :]
SCREAMING_SNAKE_CASE_: str =total_user[len(total_user) - 1 :]
SCREAMING_SNAKE_CASE_: int =total_match[len(total_match) - 1 :]
# voting system with forecasting
SCREAMING_SNAKE_CASE_: Optional[int] =[
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
SCREAMING_SNAKE_CASE_: Optional[int] ='' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 1 |
from math import isqrt, loga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]:
__lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int:
__lowercase = degree * loga(SCREAMING_SNAKE_CASE )
__lowercase = int(SCREAMING_SNAKE_CASE )
__lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 | 0 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase ) -> list:
'''simple docstring'''
lowercase : Any = len(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowercase , lowercase : int = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCamelCase: Optional[int] = list(range(1_0, 0, -1))
print(f'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 53 |
"""simple docstring"""
import os
from pathlib import Path
def lowercase__ ( ) -> str:
'''simple docstring'''
from torch.utils.cpp_extension import load
lowercase : List[str] = Path(_UpperCAmelCase ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
lowercase : Optional[int] = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , _UpperCAmelCase , with_cuda=_UpperCAmelCase , extra_include_paths=[str(_UpperCAmelCase )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 53 | 1 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """T5Config"""
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Any = jnp.zeros_like(SCREAMING_SNAKE_CASE )
A_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
A_ : List[Any] = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE )
A_ : int = jnp.where(shifted_input_ids == -100 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return shifted_input_ids
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "mt5"
snake_case = MTaConfig
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "mt5"
snake_case = MTaConfig
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "mt5"
snake_case = MTaConfig
| 186 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 186 | 1 |
'''simple docstring'''
def _a( UpperCamelCase__ : Tuple, UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =0
SCREAMING_SNAKE_CASE__ : List[Any] =len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Any =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
SCREAMING_SNAKE_CASE__ : Optional[int] =sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
SCREAMING_SNAKE_CASE__ : Tuple =left
SCREAMING_SNAKE_CASE__ : Optional[Any] =point
elif point > right:
SCREAMING_SNAKE_CASE__ : str =right
SCREAMING_SNAKE_CASE__ : Optional[int] =point
else:
if item < current_item:
SCREAMING_SNAKE_CASE__ : Tuple =point - 1
else:
SCREAMING_SNAKE_CASE__ : Any =point + 1
return None
def _a( UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : Tuple, UpperCamelCase__ : List[str] ):
'''simple docstring'''
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
SCREAMING_SNAKE_CASE__ : Tuple =left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__, UpperCamelCase__, point + 1, UpperCamelCase__ )
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
a_ = 0
if debug == 1:
a_ = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
a_ = 6_7
a_ = interpolation_search(collection, target)
if result is not None:
print(F'''{target} found at positions: {result}''')
else:
print('Not found') | 222 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
a_ = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
a_ = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
a_ = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __magic_name__ ( self : List[Any] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __magic_name__ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : List[Any] , __lowercase : str=False ) -> int:
if return_pvalue:
SCREAMING_SNAKE_CASE__ : List[str] =pearsonr(__lowercase , __lowercase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowercase , __lowercase )[0] )} | 222 | 1 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=8 ):
SCREAMING_SNAKE_CASE_ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
SCREAMING_SNAKE_CASE_ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class UpperCamelCase__ ( a__ ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , movq=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _UpperCamelCase ( self , _A , _A , _A , _A , _A , _A ) -> Optional[Any]:
if latents is None:
SCREAMING_SNAKE_CASE_ = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
SCREAMING_SNAKE_CASE_ = latents.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = latents * scheduler.init_noise_sigma
return latents
def _UpperCamelCase ( self , _A , _A , _A , _A , _A=None , ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else 1
# get prompt text embeddings
SCREAMING_SNAKE_CASE_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , truncation=SCREAMING_SNAKE_CASE_ , max_length=77 , return_attention_mask=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = text_inputs.input_ids
SCREAMING_SNAKE_CASE_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
SCREAMING_SNAKE_CASE_ = text_input_ids.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = text_inputs.attention_mask.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.text_encoder(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = prompt_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
SCREAMING_SNAKE_CASE_ = text_encoder_hidden_states.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
SCREAMING_SNAKE_CASE_ = text_mask.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ = 42
if negative_prompt is None:
SCREAMING_SNAKE_CASE_ = [''] * batch_size
elif type(SCREAMING_SNAKE_CASE_ ) is not type(SCREAMING_SNAKE_CASE_ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(SCREAMING_SNAKE_CASE_ )} !='''
F''' {type(SCREAMING_SNAKE_CASE_ )}.''' )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = [negative_prompt]
elif batch_size != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(SCREAMING_SNAKE_CASE_ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
SCREAMING_SNAKE_CASE_ = negative_prompt
SCREAMING_SNAKE_CASE_ = self.tokenizer(
SCREAMING_SNAKE_CASE_ , padding='''max_length''' , max_length=77 , truncation=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE_ = uncond_input.input_ids.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = uncond_input.attention_mask.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.text_encoder(
input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.shape[1]
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.repeat(1 , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = uncond_text_encoder_hidden_states.shape[1]
SCREAMING_SNAKE_CASE_ = uncond_text_encoder_hidden_states.repeat(1 , SCREAMING_SNAKE_CASE_ , 1 )
SCREAMING_SNAKE_CASE_ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE_ , -1 )
SCREAMING_SNAKE_CASE_ = uncond_text_mask.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE_ = torch.cat([negative_prompt_embeds, prompt_embeds] )
SCREAMING_SNAKE_CASE_ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
SCREAMING_SNAKE_CASE_ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _UpperCamelCase ( self , _A=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
SCREAMING_SNAKE_CASE_ = torch.device(F'''cuda:{gpu_id}''' )
SCREAMING_SNAKE_CASE_ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( self , _A=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
SCREAMING_SNAKE_CASE_ = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=SCREAMING_SNAKE_CASE_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
SCREAMING_SNAKE_CASE_ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
SCREAMING_SNAKE_CASE_ = cpu_offload_with_hook(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prev_module_hook=SCREAMING_SNAKE_CASE_ )
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE_ = cpu_offload_with_hook(self.safety_checker , SCREAMING_SNAKE_CASE_ , prev_module_hook=SCREAMING_SNAKE_CASE_ )
# We'll offload the last model manually.
SCREAMING_SNAKE_CASE_ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _UpperCamelCase ( self ) -> Optional[Any]:
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(SCREAMING_SNAKE_CASE_ )
def __call__( self , _A , _A , _A , _A = None , _A = 512 , _A = 512 , _A = 100 , _A = 4.0 , _A = 1 , _A = None , _A = None , _A = "pil" , _A = True , ) -> str:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = 1
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE_ )}''' )
SCREAMING_SNAKE_CASE_ = self._execution_device
SCREAMING_SNAKE_CASE_ = batch_size * num_images_per_prompt
SCREAMING_SNAKE_CASE_ = guidance_scale > 1.0
SCREAMING_SNAKE_CASE_ = self._encode_prompt(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = torch.cat(SCREAMING_SNAKE_CASE_ , dim=0 )
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ = image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
SCREAMING_SNAKE_CASE_ = negative_image_embeds.repeat_interleave(SCREAMING_SNAKE_CASE_ , dim=0 )
SCREAMING_SNAKE_CASE_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=SCREAMING_SNAKE_CASE_ )
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.scheduler.timesteps
SCREAMING_SNAKE_CASE_ = self.unet.config.in_channels
SCREAMING_SNAKE_CASE_ = get_new_h_w(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.movq_scale_factor )
# create initial latent
SCREAMING_SNAKE_CASE_ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE_ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE_ = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
SCREAMING_SNAKE_CASE_ = self.unet(
sample=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , added_cond_kwargs=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE_ = noise_pred.split(latents.shape[1] , dim=1 )
SCREAMING_SNAKE_CASE_ = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ = variance_pred.chunk(2 )
SCREAMING_SNAKE_CASE_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
SCREAMING_SNAKE_CASE_ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
SCREAMING_SNAKE_CASE_ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE_ = self.scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , ).prev_sample
# post-processing
SCREAMING_SNAKE_CASE_ = self.movq.decode(SCREAMING_SNAKE_CASE_ , force_not_quantize=SCREAMING_SNAKE_CASE_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
SCREAMING_SNAKE_CASE_ = image * 0.5 + 0.5
SCREAMING_SNAKE_CASE_ = image.clamp(0 , 1 )
SCREAMING_SNAKE_CASE_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE_ = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 299 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase ( snake_case__ : Dict , snake_case__ : Any=0.999 , snake_case__ : List[Any]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : int ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCamelCase : List[Any] = []
for i in range(snake_case__ ):
UpperCamelCase : Optional[Any] = i / num_diffusion_timesteps
UpperCamelCase : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCAmelCase_ ( a__ , a__ ):
UpperCAmelCase__ : List[Any] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : List[str] = 2
@register_to_config
def __init__( self, SCREAMING_SNAKE_CASE_ = 1000, SCREAMING_SNAKE_CASE_ = 0.0_00_85, SCREAMING_SNAKE_CASE_ = 0.0_12, SCREAMING_SNAKE_CASE_ = "linear", SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = "epsilon", SCREAMING_SNAKE_CASE_ = "linspace", SCREAMING_SNAKE_CASE_ = 0, ) -> List[str]:
if trained_betas is not None:
UpperCamelCase : Union[str, Any] = torch.tensor(SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase : List[Any] = torch.linspace(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase : int = (
torch.linspace(beta_start**0.5, beta_end**0.5, SCREAMING_SNAKE_CASE_, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase : Optional[int] = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCamelCase : Optional[int] = 1.0 - self.betas
UpperCamelCase : int = torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> str:
if schedule_timesteps is None:
UpperCamelCase : Union[str, Any] = self.timesteps
UpperCamelCase : Dict = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCamelCase : Optional[int] = 1 if len(SCREAMING_SNAKE_CASE_ ) > 1 else 0
else:
UpperCamelCase : str = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
UpperCamelCase : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case_ ( self ) -> List[Any]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> torch.FloatTensor:
UpperCamelCase : Optional[int] = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
if self.state_in_first_order:
UpperCamelCase : Dict = self.sigmas[step_index]
else:
UpperCamelCase : int = self.sigmas_interpol[step_index]
UpperCamelCase : Any = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, ) -> Optional[int]:
UpperCamelCase : Dict = num_inference_steps
UpperCamelCase : List[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCamelCase : int = np.linspace(0, num_train_timesteps - 1, SCREAMING_SNAKE_CASE_, dtype=SCREAMING_SNAKE_CASE_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCamelCase : List[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase : Optional[Any] = (np.arange(0, SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCamelCase : Optional[int] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase : Any = (np.arange(SCREAMING_SNAKE_CASE_, 0, -step_ratio )).round().copy().astype(SCREAMING_SNAKE_CASE_ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCamelCase : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCamelCase : Optional[int] = torch.from_numpy(np.log(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = np.interp(SCREAMING_SNAKE_CASE_, np.arange(0, len(SCREAMING_SNAKE_CASE_ ) ), SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCamelCase : Optional[int] = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(device=SCREAMING_SNAKE_CASE_ )
# interpolate sigmas
UpperCamelCase : Union[str, Any] = sigmas.log().lerp(sigmas.roll(1 ).log(), 0.5 ).exp()
UpperCamelCase : Any = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
UpperCamelCase : Optional[Any] = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
# mps does not support float64
UpperCamelCase : Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_, dtype=torch.floataa )
else:
UpperCamelCase : Dict = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
# interpolate timesteps
UpperCamelCase : int = self.sigma_to_t(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_, dtype=timesteps.dtype )
UpperCamelCase : List[str] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]), dim=-1 ).flatten()
UpperCamelCase : Optional[Any] = torch.cat([timesteps[:1], interleaved_timesteps] )
UpperCamelCase : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCamelCase : Dict = defaultdict(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# get log sigma
UpperCamelCase : List[Any] = sigma.log()
# get distribution
UpperCamelCase : Optional[int] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
UpperCamelCase : Optional[int] = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
UpperCamelCase : Tuple = low_idx + 1
UpperCamelCase : List[str] = self.log_sigmas[low_idx]
UpperCamelCase : Optional[Any] = self.log_sigmas[high_idx]
# interpolate sigmas
UpperCamelCase : int = (low - log_sigma) / (low - high)
UpperCamelCase : Tuple = w.clamp(0, 1 )
# transform interpolation to time range
UpperCamelCase : List[str] = (1 - w) * low_idx + w * high_idx
UpperCamelCase : Dict = t.view(sigma.shape )
return t
@property
def snake_case_ ( self ) -> Optional[int]:
return self.sample is None
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = True, ) -> Union[SchedulerOutput, Tuple]:
UpperCamelCase : str = self.index_for_timestep(SCREAMING_SNAKE_CASE_ )
# advance index counter by 1
UpperCamelCase : Optional[int] = timestep.cpu().item() if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCamelCase : Tuple = self.sigmas[step_index]
UpperCamelCase : Dict = self.sigmas_interpol[step_index + 1]
UpperCamelCase : Optional[int] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
UpperCamelCase : str = self.sigmas[step_index - 1]
UpperCamelCase : Dict = self.sigmas_interpol[step_index]
UpperCamelCase : Any = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCamelCase : Dict = 0
UpperCamelCase : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCamelCase : Any = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase : List[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCamelCase : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCamelCase : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCamelCase : Any = sigma_interpol - sigma_hat
# store for 2nd order step
UpperCamelCase : Tuple = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
UpperCamelCase : Union[str, Any] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
UpperCamelCase : Dict = sigma_next - sigma_hat
UpperCamelCase : Any = self.sample
UpperCamelCase : str = None
UpperCamelCase : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCamelCase : Optional[Any] = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(SCREAMING_SNAKE_CASE_ ):
# mps does not support float64
UpperCamelCase : List[str] = self.timesteps.to(original_samples.device, dtype=torch.floataa )
UpperCamelCase : str = timesteps.to(original_samples.device, dtype=torch.floataa )
else:
UpperCamelCase : Dict = self.timesteps.to(original_samples.device )
UpperCamelCase : int = timesteps.to(original_samples.device )
UpperCamelCase : str = [self.index_for_timestep(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for t in timesteps]
UpperCamelCase : List[Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCamelCase : int = sigma.unsqueeze(-1 )
UpperCamelCase : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Optional[int]:
return self.config.num_train_timesteps
| 119 | 0 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES
else:
SCREAMING_SNAKE_CASE__ = {tokenizer_name: getattr(__UpperCamelCase , tokenizer_name + """Fast""" )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
SCREAMING_SNAKE_CASE__ = TOKENIZER_CLASSES[tokenizer_name]
SCREAMING_SNAKE_CASE__ = True
if checkpoint_name is None:
SCREAMING_SNAKE_CASE__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
SCREAMING_SNAKE_CASE__ = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
SCREAMING_SNAKE_CASE__ = tokenizer_class.from_pretrained(__UpperCamelCase , force_download=__UpperCamelCase )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = checkpoint.split("""/""" )
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
elif add_prefix:
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = dump_path
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
SCREAMING_SNAKE_CASE__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
SCREAMING_SNAKE_CASE__ = file_path.split(__UpperCamelCase )[-1][0]
if next_char == "/":
SCREAMING_SNAKE_CASE__ = os.path.join(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
SCREAMING_SNAKE_CASE__ = tokenizer.save_pretrained(
__UpperCamelCase , legacy_format=__UpperCamelCase , filename_prefix=__UpperCamelCase )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith("""tokenizer.json""" ):
os.remove(__UpperCamelCase )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
__lowerCamelCase : Any = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 204 | import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_in.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_in.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.norm_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.norm_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_in.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_in.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.norm_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.norm_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""quant_conv.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""quant_conv.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""post_quant_conv.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__UpperCamelCase )
}
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
SCREAMING_SNAKE_CASE__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
SCREAMING_SNAKE_CASE__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """encoder.mid.block""" in key]
SCREAMING_SNAKE_CASE__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE__ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE__ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
SCREAMING_SNAKE_CASE__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
SCREAMING_SNAKE_CASE__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """decoder.mid.block""" in key]
SCREAMING_SNAKE_CASE__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE__ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
return new_checkpoint
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
SCREAMING_SNAKE_CASE__ = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE__ = OmegaConf.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = 5_12
SCREAMING_SNAKE_CASE__ = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE__ = {}
with safe_open(__UpperCamelCase , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE__ = f.get_tensor(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )["""state_dict"""]
# Convert the VAE model.
SCREAMING_SNAKE_CASE__ = create_vae_diffusers_config(__UpperCamelCase , image_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = custom_convert_ldm_vae_checkpoint(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(**__UpperCamelCase )
vae.load_state_dict(__UpperCamelCase )
vae.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
__lowerCamelCase : Optional[int] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 204 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def __lowerCAmelCase ( a__ , a__ , a__ ) -> List[Any]:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , _SCREAMING_SNAKE_CASE )
__a = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__a = dataset_size < in_memory_max_size
else:
__a = False
__a = is_small_dataset(_SCREAMING_SNAKE_CASE )
assert result == expected | 6 |
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) ->Dict:
"""simple docstring"""
snake_case_ = """ylacombe/bark-small"""
snake_case_ = tempfile.mkdtemp()
snake_case_ = """en_speaker_1"""
snake_case_ = """This is a test string"""
snake_case_ = """speaker_embeddings_path.json"""
snake_case_ = """speaker_embeddings"""
def lowerCAmelCase ( self : List[str] , **UpperCAmelCase_ : str ) ->Optional[int]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_ )
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ )
processor.save_pretrained(self.tmpdirname )
snake_case_ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
snake_case_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case_ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
snake_case_ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
snake_case_ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case_ = 35
snake_case_ = 2
snake_case_ = 8
snake_case_ = {
"""semantic_prompt""": np.ones(UpperCAmelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
snake_case_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case_ = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = processor(text=self.input_string , voice_preset=UpperCAmelCase_ )
snake_case_ = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case_ = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCAmelCase ( self : Tuple ) ->Dict:
"""simple docstring"""
snake_case_ = self.get_tokenizer()
snake_case_ = BarkProcessor(tokenizer=UpperCAmelCase_ )
snake_case_ = processor(text=self.input_string )
snake_case_ = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 347 | 0 |
from PIL import Image
def _UpperCamelCase ( UpperCamelCase_ : Image ) -> Image:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = image.size
lowerCAmelCase__ = 0
lowerCAmelCase__ = image.load()
for i in range(UpperCamelCase_ ):
for j in range(UpperCamelCase_ ):
lowerCAmelCase__ = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(UpperCamelCase_ ):
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__snake_case : Optional[int] = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 369 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : List[str] = TextToVideoSDPipeline
_SCREAMING_SNAKE_CASE : str = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_SCREAMING_SNAKE_CASE : Dict = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
lowerCAmelCase__ = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , )
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
lowerCAmelCase__ = CLIPTextModel(_UpperCamelCase )
lowerCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
if str(_UpperCamelCase ).startswith('mps' ):
lowerCAmelCase__ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = TextToVideoSDPipeline(**_UpperCamelCase )
lowerCAmelCase__ = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = self.get_dummy_inputs(_UpperCamelCase )
lowerCAmelCase__ = 'np'
lowerCAmelCase__ = sd_pipe(**_UpperCamelCase ).frames
lowerCAmelCase__ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
lowerCAmelCase__ = np.array([1_58.0, 1_60.0, 1_53.0, 1_25.0, 1_00.0, 1_21.0, 1_11.0, 93.0, 1_13.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_UpperCamelCase , expected_max_diff=3E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCamelCase , expected_max_diff=1E-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
lowerCAmelCase__ = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCAmelCase__ = pipe.to('cuda' )
lowerCAmelCase__ = 'Spiderman is surfing'
lowerCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ = pipe(_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=25 , output_type='pt' ).frames
lowerCAmelCase__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
lowerCAmelCase__ = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
lowerCAmelCase__ = pipe.to('cuda' )
lowerCAmelCase__ = 'Spiderman is surfing'
lowerCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ = pipe(_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type='pt' ).frames
lowerCAmelCase__ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5E-2
| 122 | 0 |
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__a = logging.get_logger(__name__)
def a ( snake_case__: str , snake_case__: str , snake_case__: Any ):
'''simple docstring'''
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def a ( snake_case__: np.ndarray , snake_case__: Optional[str] , snake_case__: Optional[str] = None ):
'''simple docstring'''
lowercase_ = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
lowercase_ = to_pil_image(snake_case__ )
lowercase_ , lowercase_ = pil_image.size
lowercase_ = pytesseract.image_to_data(snake_case__ , lang=snake_case__ , output_type='''dict''' , config=snake_case__ )
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase_ = [idx for idx, word in enumerate(snake_case__ ) if not word.strip()]
lowercase_ = [word for idx, word in enumerate(snake_case__ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
lowercase_ = [coord for idx, coord in enumerate(snake_case__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase_ = []
for x, y, w, h in zip(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowercase_ = [x, y, x + w, y + h]
actual_boxes.append(snake_case__ )
# finally, normalize the bounding boxes
lowercase_ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(snake_case__ , snake_case__ , snake_case__ ) )
assert len(snake_case__ ) == len(snake_case__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[Any] = ['pixel_values']
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = "" , **SCREAMING_SNAKE_CASE_ : int , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE_ )
lowercase_ = size if size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = apply_ocr
lowercase_ = ocr_lang
lowercase_ = tesseract_config
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Dict[str, int] , SCREAMING_SNAKE_CASE_ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> np.ndarray:
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase_ = (size['''height'''], size['''width'''])
return resize(SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ , data_format=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : ImageInput , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Dict[str, int] = None , SCREAMING_SNAKE_CASE_ : PILImageResampling = None , SCREAMING_SNAKE_CASE_ : bool = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : Dict , ) -> PIL.Image.Image:
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(SCREAMING_SNAKE_CASE_ )
lowercase_ = resample if resample is not None else self.resample
lowercase_ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase_ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase_ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase_ = make_list_of_images(SCREAMING_SNAKE_CASE_ )
if not valid_images(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(SCREAMING_SNAKE_CASE_ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase_ = []
lowercase_ = []
for image in images:
lowercase_ , lowercase_ = apply_tesseract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
words_batch.append(SCREAMING_SNAKE_CASE_ )
boxes_batch.append(SCREAMING_SNAKE_CASE_ )
if do_resize:
lowercase_ = [self.resize(image=SCREAMING_SNAKE_CASE_ , size=SCREAMING_SNAKE_CASE_ , resample=SCREAMING_SNAKE_CASE_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase_ = [flip_channel_order(SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = [to_channel_dimension_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for image in images]
lowercase_ = BatchFeature(data={'''pixel_values''': images} , tensor_type=SCREAMING_SNAKE_CASE_ )
if apply_ocr:
lowercase_ = words_batch
lowercase_ = boxes_batch
return data
| 30 |
from __future__ import annotations
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowercase_ , lowercase_ = array[indexa], array[indexa]
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
for i in range(snake_case__ , low + middle ):
comp_and_swap(snake_case__ , snake_case__ , i + middle , snake_case__ )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
bitonic_merge(snake_case__ , low + middle , snake_case__ , snake_case__ )
def a ( snake_case__: list[int] , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
if length > 1:
lowercase_ = int(length / 2 )
bitonic_sort(snake_case__ , snake_case__ , snake_case__ , 1 )
bitonic_sort(snake_case__ , low + middle , snake_case__ , 0 )
bitonic_merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
__a = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 30 | 1 |
import operator as op
_lowercase: str = "scaler.pt"
_lowercase: List[str] = "pytorch_model"
_lowercase: List[str] = "random_states"
_lowercase: Optional[int] = "optimizer"
_lowercase: Any = "scheduler"
_lowercase: Optional[int] = "pytorch_model.bin"
_lowercase: List[str] = "pytorch_model.bin.index.json"
_lowercase: Optional[int] = "model.safetensors"
_lowercase: int = "model.safetensors.index.json"
_lowercase: Optional[Any] = "1.10.2"
_lowercase: Optional[Any] = "py38"
_lowercase: Any = "4.17.0"
_lowercase: Union[str, Any] = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"]
_lowercase: Tuple = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"]
_lowercase: Dict = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"]
_lowercase: Union[str, Any] = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"]
_lowercase: Tuple = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"]
_lowercase: int = "2.0.1"
_lowercase: int = ["pdsh", "standard", "openmpi", "mvapich"]
_lowercase: str = ["default", "reduce-overhead", "max-autotune"]
_lowercase: Tuple = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
_lowercase: Optional[int] = [
"nnodes",
"nproc_per_node",
"rdzv_backend",
"rdzv_endpoint",
"rdzv_id",
"rdzv_conf",
"standalone",
"max_restarts",
"monitor_interval",
"start_method",
"role",
"module",
"m",
"no_python",
"run_path",
"log_dir",
"r",
"redirects",
"t",
"tee",
"node_rank",
"master_addr",
"master_port",
]
_lowercase: List[Any] = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"]
_lowercase: Tuple = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
| 359 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def a( A : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = 384
a = 7
if "tiny" in model_name:
a = 96
a = (2, 2, 6, 2)
a = (3, 6, 12, 24)
elif "small" in model_name:
a = 96
a = (2, 2, 18, 2)
a = (3, 6, 12, 24)
elif "base" in model_name:
a = 128
a = (2, 2, 18, 2)
a = (4, 8, 16, 32)
a = 12
a = 512
elif "large" in model_name:
a = 192
a = (2, 2, 18, 2)
a = (6, 12, 24, 48)
a = 12
a = 768
# set label information
a = 150
a = "huggingface/label-files"
a = "ade20k-id2label.json"
a = json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
a = {int(A ): v for k, v in idalabel.items()}
a = {v: k for k, v in idalabel.items()}
a = SwinConfig(
embed_dim=A , depths=A , num_heads=A , window_size=A , out_features=["stage1", "stage2", "stage3", "stage4"] , )
a = UperNetConfig(
backbone_config=A , auxiliary_in_channels=A , num_labels=A , idalabel=A , labelaid=A , )
return config
def a( A : Optional[Any] ) -> Tuple:
"""simple docstring"""
a = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.norm2.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias''', f'''backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.stages.{i}.downsample.reduction.weight''', f'''backbone.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.weight''', f'''backbone.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.downsample.norm.bias''', f'''backbone.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def a( A : List[str] , A : List[str] , A : Dict ) -> Any:
"""simple docstring"""
a = dct.pop(A )
a = val
def a( A : str , A : List[str] ) -> List[Any]:
"""simple docstring"""
a = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight''' )
a = state_dict.pop(f'''backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
a = in_proj_weight[:dim, :]
a = in_proj_bias[: dim]
a = in_proj_weight[
dim : dim * 2, :
]
a = in_proj_bias[
dim : dim * 2
]
a = in_proj_weight[
-dim :, :
]
a = in_proj_bias[-dim :]
# fmt: on
def a( A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
a , a = x.shape
a = x.reshape(A , 4 , in_channel // 4 )
a = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(A , A )
return x
def a( A : int ) -> Dict:
"""simple docstring"""
a , a = x.shape
a = x.reshape(A , in_channel // 4 , 4 )
a = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(A , A )
return x
def a( A : List[Any] ) -> Dict:
"""simple docstring"""
a = x.shape[0]
a = x.reshape(4 , in_channel // 4 )
a = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(A )
return x
def a( A : Optional[Any] ) -> List[str]:
"""simple docstring"""
a = x.shape[0]
a = x.reshape(in_channel // 4 , 4 )
a = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(A )
return x
def a( A : Any , A : int , A : Dict ) -> Union[str, Any]:
"""simple docstring"""
a = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
a = model_name_to_url[model_name]
a = torch.hub.load_state_dict_from_url(A , map_location="cpu" , file_name=A )[
"state_dict"
]
for name, param in state_dict.items():
print(A , param.shape )
a = get_upernet_config(A )
a = UperNetForSemanticSegmentation(A )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a = state_dict.pop(A )
if "bn" in key:
a = key.replace("bn" , "batch_norm" )
a = val
# rename keys
a = create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_q_k_v(A , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
a = reverse_correct_unfold_reduction_order(A )
if "norm" in key:
a = reverse_correct_unfold_norm_order(A )
model.load_state_dict(A )
# verify on image
a = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
a = Image.open(requests.get(A , stream=A ).raw ).convert("RGB" )
a = SegformerImageProcessor()
a = processor(A , return_tensors="pt" ).pixel_values
with torch.no_grad():
a = model(A )
a = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
a = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] )
elif model_name == "upernet-swin-small":
a = torch.tensor(
[[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] )
elif model_name == "upernet-swin-base":
a = torch.tensor(
[[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] )
elif model_name == "upernet-swin-large":
a = torch.tensor(
[[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , A , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(A )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
_lowercase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-swin-tiny",
type=str,
choices=[F"""upernet-swin-{size}""" for size in ["tiny", "small", "base", "large"]],
help="Name of the Swin + UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowercase: int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 71 | 0 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCamelCase_ ) , """Tatoeba directory does not exist.""" )
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
lowerCAmelCase = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
self.resolver.convert_models(['''heb-eng'''] )
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase , lowerCAmelCase = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=__SCREAMING_SNAKE_CASE )
assert mmeta["long_pair"] == "heb-eng"
| 338 | import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowercase__ : Any = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
lowercase__ : Tuple = '''▁'''
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase = (
AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->int:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any:
if self.remove_space:
lowerCAmelCase = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = ''''''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 338 | 1 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _snake_case (A__ , unittest.TestCase):
__A : Tuple =DebertaVaTokenizer
__A : List[str] =DebertaVaTokenizerFast
__A : Tuple =True
__A : List[Any] =True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ : Dict = DebertaVaTokenizer(lowerCamelCase__ ,unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : str = "this is a test"
UpperCAmelCase_ : Optional[int] = "this is a test"
return input_text, output_text
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = "<pad>"
UpperCAmelCase_ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase__ ) ,lowerCamelCase__ )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"[PAD]" )
self.assertEqual(len(lowerCamelCase__ ) ,3_00_01 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Any = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : List[Any] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
UpperCAmelCase_ : str = DebertaVaTokenizer(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ )
UpperCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : Dict = DebertaVaTokenizerFast(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ )
UpperCAmelCase_ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Tuple = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase_ : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : int = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : str = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : int = DebertaVaTokenizer(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Dict = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Any = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizer(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : Union[str, Any] = DebertaVaTokenizerFast(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase_ : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : List[str] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : List[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
UpperCAmelCase_ : Optional[int] = DebertaVaTokenizer(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : List[Any] = DebertaVaTokenizerFast(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : Optional[int] = " \tHeLLo!how \n Are yoU? "
UpperCAmelCase_ : Dict = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
UpperCAmelCase_ : Tuple = DebertaVaTokenizer(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : Tuple = DebertaVaTokenizerFast(lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,split_by_punct=lowerCamelCase__ )
UpperCAmelCase_ : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_tokenizer()
UpperCAmelCase_ : Union[str, Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Optional[Any] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ) )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
UpperCAmelCase_ : Dict = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : List[str] = self.get_rust_tokenizer()
UpperCAmelCase_ : Any = tokenizer.encode(lowerCamelCase__ )
UpperCAmelCase_ : str = rust_tokenizer.encode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = "This is a test"
UpperCAmelCase_ : str = [13, 1, 43_98, 25, 21, 12_89]
UpperCAmelCase_ : List[Any] = ["▁", "T", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : Dict = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
UpperCAmelCase_ : Tuple = DebertaVaTokenizer(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
UpperCAmelCase_ : Optional[Any] = DebertaVaTokenizerFast(lowerCamelCase__ ,keep_accents=lowerCamelCase__ )
UpperCAmelCase_ : List[Any] = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : int = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : List[Any] = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : List[Any] = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : str = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
# fmt: off
UpperCAmelCase_ : Optional[int] = "I was born in 92000, and this is falsé."
UpperCAmelCase_ : str = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCAmelCase_ : List[str] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
UpperCAmelCase_ : Any = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
UpperCAmelCase_ : str = tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : Any = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : List[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : Any = rust_tokenizer.encode(lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : str = rust_tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
UpperCAmelCase_ : List[Any] = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ ,lowerCamelCase__ )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = DebertaVaTokenizer(lowerCamelCase__ )
UpperCAmelCase_ : Any = tokenizer.encode("sequence builders" )
UpperCAmelCase_ : List[str] = tokenizer.encode("multi-sequence build" )
UpperCAmelCase_ : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase__ ,lowerCamelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,lowerCamelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,lowerCamelCase__ ,)
@slow
def UpperCamelCase__ ( self ):
# fmt: off
UpperCAmelCase_ : int = {"input_ids": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase__ ,model_name="microsoft/deberta-v2-xlarge" ,revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" ,)
| 356 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_lowerCamelCase = datasets.utils.logging.get_logger(__name__)
class _snake_case (folder_based_builder.FolderBasedBuilderConfig):
__A : bool =None
__A : bool =None
class _snake_case (folder_based_builder.FolderBasedBuilder):
__A : Union[str, Any] =datasets.Audio()
__A : Optional[int] ="audio"
__A : Any =AudioFolderConfig
__A : List[str] # definition at the bottom of the script
__A : Optional[int] =AudioClassification(audio_column="audio" , label_column="label")
_lowerCamelCase = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
_lowerCamelCase = AUDIO_EXTENSIONS
| 67 | 0 |
"""simple docstring"""
import functools
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_SCREAMING_SNAKE_CASE ) != 3 or not all(isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return 0
if min(_SCREAMING_SNAKE_CASE ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_SCREAMING_SNAKE_CASE ) >= 366:
raise ValueError("All days elements should be less than 366" )
UpperCamelCase = set(_SCREAMING_SNAKE_CASE )
@functools.cache
def dynamic_programming(_SCREAMING_SNAKE_CASE ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 153 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> Tuple:
UpperCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
UpperCamelCase = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__a ) , torch_builtin(__a ) ) )
self.assertFalse(torch.allclose(gelu_python(__a ) , gelu_new(__a ) ) )
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = torch.tensor([-1_00, -1, -0.1, 0, 0.1, 1.0, 1_00] )
UpperCamelCase = get_activation("gelu" )
UpperCamelCase = get_activation("gelu_10" )
UpperCamelCase = torch_builtin(__a )
UpperCamelCase = geluaa(__a )
UpperCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case_ (self ) -> Any:
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__a ):
get_activation("bogus" )
with self.assertRaises(__a ):
get_activation(__a )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = get_activation("gelu" )
UpperCamelCase = 1
UpperCamelCase = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__a ):
UpperCamelCase = acta.a
| 153 | 1 |
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] ="Speech2TextFeatureExtractor"
lowerCamelCase : str ="Speech2TextTokenizer"
def __init__( self : Optional[Any] , a : Dict , a : str ):
"""simple docstring"""
super().__init__(a , a )
__lowerCamelCase = self.feature_extractor
__lowerCamelCase = False
def __call__( self : Union[str, Any] , *a : Dict , **a : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*a , **a )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
__lowerCamelCase = kwargs.pop('''raw_speech''' )
else:
__lowerCamelCase = kwargs.pop('''audio''' , a )
__lowerCamelCase = kwargs.pop('''sampling_rate''' , a )
__lowerCamelCase = kwargs.pop('''text''' , a )
if len(a ) > 0:
__lowerCamelCase = args[0]
__lowerCamelCase = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__lowerCamelCase = self.feature_extractor(a , *a , sampling_rate=a , **a )
if text is not None:
__lowerCamelCase = self.tokenizer(a , **a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCamelCase = encodings['''input_ids''']
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , *a : List[str] , **a : Optional[int] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Dict , *a : Tuple , **a : Dict ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
__lowerCamelCase = True
__lowerCamelCase = self.tokenizer
yield
__lowerCamelCase = self.feature_extractor
__lowerCamelCase = False
| 237 | '''simple docstring'''
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
__UpperCAmelCase =[
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
for attribute in key.split('''.''' ):
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__lowerCamelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__lowerCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__lowerCamelCase = value
elif weight_type == "weight_g":
__lowerCamelCase = value
elif weight_type == "weight_v":
__lowerCamelCase = value
elif weight_type == "bias":
__lowerCamelCase = value
else:
__lowerCamelCase = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
__lowerCamelCase = []
__lowerCamelCase = fairseq_model.state_dict()
__lowerCamelCase = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
__lowerCamelCase = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
__lowerCamelCase = True
if "*" in mapped_key:
__lowerCamelCase = name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
__lowerCamelCase = mapped_key.replace('''*''' , UpperCamelCase__ )
if "weight_g" in name:
__lowerCamelCase = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase = '''weight_v'''
elif "bias" in name:
__lowerCamelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase = '''weight'''
else:
__lowerCamelCase = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
__lowerCamelCase = full_name.split('''conv_layers.''' )[-1]
__lowerCamelCase = name.split('''.''' )
__lowerCamelCase = int(items[0] )
__lowerCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" )
__lowerCamelCase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True ) -> Optional[Any]:
if config_path is not None:
__lowerCamelCase = UniSpeechSatConfig.from_pretrained(UpperCamelCase__ )
else:
__lowerCamelCase = UniSpeechSatConfig()
__lowerCamelCase = ''''''
if is_finetuned:
__lowerCamelCase = UniSpeechSatForCTC(UpperCamelCase__ )
else:
__lowerCamelCase = UniSpeechSatForPreTraining(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__lowerCamelCase = model[0].eval()
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__UpperCAmelCase =parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 237 | 1 |
'''simple docstring'''
import math
class lowercase__ :
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : list[list[float]] ,lowerCamelCase__ : list[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = 0.0
_UpperCamelCase : Tuple = 0.0
for i in range(len(lowerCamelCase__ ) ):
da += math.pow((sample[i] - weights[0][i]) ,2 )
da += math.pow((sample[i] - weights[1][i]) ,2 )
return 0 if da > da else 1
return 0
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : list[list[int | float]] ,lowerCamelCase__ : list[int] ,lowerCamelCase__ : int ,lowerCamelCase__ : float ):
'''simple docstring'''
for i in range(len(lowerCamelCase__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def A__ ( ):
# Training Examples ( m, n )
_UpperCamelCase : str = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCamelCase : Optional[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCamelCase : Dict = SelfOrganizingMap()
_UpperCamelCase : Union[str, Any] = 3
_UpperCamelCase : Optional[Any] = 0.5
for _ in range(UpperCAmelCase_ ):
for j in range(len(UpperCAmelCase_ ) ):
# training sample
_UpperCamelCase : Any = training_samples[j]
# Compute the winning vector
_UpperCamelCase : List[str] = self_organizing_map.get_winner(UpperCAmelCase_ , UpperCAmelCase_ )
# Update the winning vector
_UpperCamelCase : int = self_organizing_map.update(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# classify test sample
_UpperCamelCase : Union[str, Any] = [0, 0, 0, 1]
_UpperCamelCase : Union[str, Any] = self_organizing_map.get_winner(UpperCAmelCase_ , UpperCAmelCase_ )
# results
print(f'Clusters that the test sample belongs to : {winner}' )
print(f'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 83 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case_ : Dict = logging.get_logger(__name__)
class lowercase__ ( lowercase ):
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : float ,**lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : List[Any] = feature_size
_UpperCamelCase : Any = sampling_rate
_UpperCamelCase : Optional[Any] = padding_value
_UpperCamelCase : Union[str, Any] = kwargs.pop('padding_side' ,'right' )
_UpperCamelCase : Dict = kwargs.pop('return_attention_mask' ,lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] ,lowerCamelCase__ : Union[bool, str, PaddingStrategy] = True ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,):
'''simple docstring'''
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase__ ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
_UpperCamelCase : int = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
_UpperCamelCase : List[Any] = processed_features[self.model_input_names[0]]
_UpperCamelCase : Dict = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__ ) == 0:
if return_attention_mask:
_UpperCamelCase : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_UpperCamelCase : List[str] = required_input[0]
if isinstance(lowerCamelCase__ ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_UpperCamelCase : List[str] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase__ ):
_UpperCamelCase : Dict = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__ ):
_UpperCamelCase : Any = 'tf'
elif is_torch_tensor(lowerCamelCase__ ):
_UpperCamelCase : Optional[int] = 'pt'
elif isinstance(lowerCamelCase__ ,(int, float, list, tuple, np.ndarray) ):
_UpperCamelCase : int = 'np'
else:
raise ValueError(
F'type of {first_element} unknown: {type(lowerCamelCase__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
_UpperCamelCase : Any = to_numpy(lowerCamelCase__ )
else:
_UpperCamelCase : Any = [to_numpy(lowerCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
_UpperCamelCase : Optional[int] = self._get_padding_strategies(padding=lowerCamelCase__ ,max_length=lowerCamelCase__ )
_UpperCamelCase : str = processed_features[self.model_input_names[0]]
_UpperCamelCase : List[str] = len(lowerCamelCase__ )
if not all(len(lowerCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
_UpperCamelCase : List[str] = []
for i in range(lowerCamelCase__ ):
_UpperCamelCase : List[str] = {k: v[i] for k, v in processed_features.items()}
# truncation
_UpperCamelCase : List[str] = self._truncate(
lowerCamelCase__ ,max_length=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,truncation=lowerCamelCase__ ,)
truncated_inputs.append(lowerCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_UpperCamelCase : Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_UpperCamelCase : Any = PaddingStrategy.MAX_LENGTH
_UpperCamelCase : Optional[Any] = {}
for i in range(lowerCamelCase__ ):
# padding
_UpperCamelCase : Any = self._pad(
truncated_inputs[i] ,max_length=lowerCamelCase__ ,padding_strategy=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,)
for key, value in outputs.items():
if key not in batch_outputs:
_UpperCamelCase : Dict = []
if value.dtype is np.dtype(np.floataa ):
_UpperCamelCase : Any = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase__ )
return BatchFeature(lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_UpperCamelCase : Optional[Any] = len(lowerCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCamelCase : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCamelCase : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_UpperCamelCase : Tuple = np.ones(len(lowerCamelCase__ ) ,dtype=np.intaa )
if needs_to_be_padded:
_UpperCamelCase : Dict = max_length - len(lowerCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
_UpperCamelCase : Optional[int] = np.pad(
processed_features['attention_mask'] ,(0, difference) )
_UpperCamelCase : Union[str, Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_UpperCamelCase : List[Any] = np.pad(
lowerCamelCase__ ,lowerCamelCase__ ,'constant' ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_UpperCamelCase : List[Any] = np.pad(
processed_features['attention_mask'] ,(difference, 0) )
_UpperCamelCase : List[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_UpperCamelCase : List[str] = np.pad(
lowerCamelCase__ ,lowerCamelCase__ ,'constant' ,constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
_UpperCamelCase : int = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCamelCase : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCamelCase : Optional[int] = len(lowerCamelCase__ ) > max_length
if needs_to_be_truncated:
_UpperCamelCase : Dict = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_UpperCamelCase : Optional[Any] = processed_features['attention_mask'][:max_length]
return processed_features
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : int=False ,lowerCamelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Get padding strategy
if padding is not False:
if padding is True:
_UpperCamelCase : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Tuple = PaddingStrategy(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = padding
else:
_UpperCamelCase : List[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 83 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : List[Any] = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Dict = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Any = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : int = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[str] = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
lowerCamelCase_ : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 363 |
def lowerCamelCase ( a_ ) -> bool:
lowerCAmelCase_ = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
lowerCAmelCase_ = set()
return any(
node not in visited and depth_first_search(a_ , a_ , a_ , a_ )
for node in graph )
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> bool:
visited.add(a_ )
rec_stk.add(a_ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a_ , a_ , a_ , a_ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a_ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 14 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A = 16 , _A = 88 , _A = None , _A = 1 , _A = 0.0 , _A = 32 , _A = None , _A = False , _A = None , _A = None , _A = "geglu" , _A = None , ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_A , attention_head_dim=_A , in_channels=_A , num_layers=_A , dropout=_A , norm_num_groups=_A , cross_attention_dim=_A , attention_bias=_A , sample_size=_A , num_vector_embeds=_A , activation_fn=_A , num_embeds_ada_norm=_A , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__SCREAMING_SNAKE_CASE = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__SCREAMING_SNAKE_CASE = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__SCREAMING_SNAKE_CASE = [1, 0]
def _A ( self , _A , _A , _A=None , _A=None , _A=None , _A = True , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__SCREAMING_SNAKE_CASE = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__SCREAMING_SNAKE_CASE = self.transformer_index_for_condition[i]
__SCREAMING_SNAKE_CASE = self.transformers[transformer_index](
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , return_dict=_A , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__SCREAMING_SNAKE_CASE = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__SCREAMING_SNAKE_CASE = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_A )
| 257 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any =logging.get_logger(__name__)
lowerCAmelCase__ : str ={
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = '''unispeech-sat'''
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3_072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.0_2 , _A=1e-5 , _A="group" , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=128 , _A=16 , _A=False , _A=True , _A=0.0_5 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A=320 , _A=2 , _A=0.1 , _A=100 , _A=256 , _A=256 , _A=0.1 , _A="mean" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1_500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=0 , _A=1 , _A=2 , _A=504 , **_A , ):
'''simple docstring'''
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = num_clusters
__SCREAMING_SNAKE_CASE = do_stable_layer_norm
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE = num_codevectors_per_group
__SCREAMING_SNAKE_CASE = num_codevector_groups
__SCREAMING_SNAKE_CASE = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE = num_negatives
__SCREAMING_SNAKE_CASE = codevector_dim
__SCREAMING_SNAKE_CASE = proj_codevector_dim
__SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def _A ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 257 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return str(UpperCamelCase ) == str(UpperCamelCase )[::-1]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return int(UpperCamelCase ) + int(str(UpperCamelCase )[::-1] )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 10000 ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = []
for num in range(1 , UpperCamelCase ):
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Any = num
while iterations < 50:
lowerCAmelCase__ : Dict = sum_reverse(UpperCamelCase )
iterations += 1
if is_palindrome(UpperCamelCase ):
break
else:
lychrel_nums.append(UpperCamelCase )
return len(UpperCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 184 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return (-y * np.log(UpperCamelCase ) - (1 - y) * np.log(1 - h )).mean()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = np.dot(UpperCamelCase , UpperCamelCase )
return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase ) ) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=70000 ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(UpperCamelCase ):
lowerCAmelCase__ : str = np.dot(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = sigmoid_function(UpperCamelCase )
lowerCAmelCase__ : List[str] = np.dot(x.T , h - y ) / y.size
lowerCAmelCase__ : List[Any] = theta - alpha * gradient # updating the weights
lowerCAmelCase__ : List[Any] = np.dot(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = sigmoid_function(UpperCamelCase )
lowerCAmelCase__ : List[Any] = cost_function(UpperCamelCase , UpperCamelCase )
if iterations % 100 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_lowerCAmelCase = datasets.load_iris()
_lowerCAmelCase = iris.data[:, :2]
_lowerCAmelCase = (iris.target != 0) * 1
_lowerCAmelCase = 0.1
_lowerCAmelCase = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return sigmoid_function(
np.dot(UpperCamelCase , UpperCamelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((_lowerCAmelCase) , (_lowerCAmelCase)) = (x[:, 0].min(), x[:, 0].max())
((_lowerCAmelCase) , (_lowerCAmelCase)) = (x[:, 1].min(), x[:, 1].max())
((_lowerCAmelCase) , (_lowerCAmelCase)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_lowerCAmelCase = np.c_[xxa.ravel(), xxa.ravel()]
_lowerCAmelCase = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 184 | 1 |
"""simple docstring"""
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : List[str] ,_lowerCamelCase : Any ) -> Union[str, Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : np.ndarray ,_lowerCamelCase : Optional[str] ,_lowerCamelCase : Optional[str] = None ) -> Optional[Any]:
_lowerCAmelCase : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
_lowerCAmelCase : Optional[Any] = to_pil_image(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : str = pil_image.size
_lowerCAmelCase : Dict = pytesseract.image_to_data(_lowerCamelCase ,lang=_lowerCamelCase ,output_type="""dict""" ,config=_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_lowerCAmelCase : List[str] = [idx for idx, word in enumerate(_lowerCamelCase ) if not word.strip()]
_lowerCAmelCase : List[Any] = [word for idx, word in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Tuple = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Dict = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Dict = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
_lowerCAmelCase : Optional[Any] = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_lowerCAmelCase : List[str] = []
for x, y, w, h in zip(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_lowerCamelCase )
# finally, normalize the bounding boxes
_lowerCAmelCase : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) )
assert len(_lowerCamelCase ) == len(_lowerCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["pixel_values"]
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BILINEAR , a__ = True , a__ = None , a__ = "" , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : Tuple = size if size is not None else {"""height""": 224, """width""": 224}
_lowerCAmelCase : Dict = get_size_dict(a__ )
_lowerCAmelCase : int = do_resize
_lowerCAmelCase : Optional[int] = size
_lowerCAmelCase : Optional[Any] = resample
_lowerCAmelCase : List[Any] = apply_ocr
_lowerCAmelCase : Dict = ocr_lang
_lowerCAmelCase : Tuple = tesseract_config
def __A ( self , a__ , a__ , a__ = PILImageResampling.BILINEAR , a__ = None , **a__ , ):
_lowerCAmelCase : Dict = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
_lowerCAmelCase : Optional[int] = (size["""height"""], size["""width"""])
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ):
_lowerCAmelCase : str = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : str = size if size is not None else self.size
_lowerCAmelCase : Optional[Any] = get_size_dict(a__ )
_lowerCAmelCase : List[Any] = resample if resample is not None else self.resample
_lowerCAmelCase : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
_lowerCAmelCase : Dict = ocr_lang if ocr_lang is not None else self.ocr_lang
_lowerCAmelCase : int = tesseract_config if tesseract_config is not None else self.tesseract_config
_lowerCAmelCase : str = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase : Optional[int] = [to_numpy_array(a__ ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Optional[Any] = []
for image in images:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = apply_tesseract(a__ , a__ , a__ )
words_batch.append(a__ )
boxes_batch.append(a__ )
if do_resize:
_lowerCAmelCase : List[Any] = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_lowerCAmelCase : Union[str, Any] = [flip_channel_order(a__ ) for image in images]
_lowerCAmelCase : Tuple = [to_channel_dimension_format(a__ , a__ ) for image in images]
_lowerCAmelCase : Any = BatchFeature(data={"""pixel_values""": images} , tensor_type=a__ )
if apply_ocr:
_lowerCAmelCase : Optional[Any] = words_batch
_lowerCAmelCase : Optional[int] = boxes_batch
return data
| 44 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """Hello world! cécé herlolip"""
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[Any] = FairseqRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE )
roberta.eval() # disable dropout
A_ : Dict = roberta.model.encoder.sentence_encoder
A_ : Optional[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
A_ : Optional[int] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , SCREAMING_SNAKE_CASE )
A_ : List[str] = XLMRobertaXLForSequenceClassification(SCREAMING_SNAKE_CASE ) if classification_head else XLMRobertaXLForMaskedLM(SCREAMING_SNAKE_CASE )
model.eval()
# Now let's copy all the weights.
# Embeddings
A_ : str = roberta_sent_encoder.embed_tokens.weight
A_ : int = roberta_sent_encoder.embed_positions.weight
A_ : Union[str, Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
A_ : Union[str, Any] = roberta_sent_encoder.layer_norm.weight
A_ : int = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A_ : BertLayer = model.roberta.encoder.layer[i]
A_ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
A_ : RobertaAttention = layer.attention
A_ : Dict = roberta_layer.self_attn_layer_norm.weight
A_ : str = roberta_layer.self_attn_layer_norm.bias
# self attention
A_ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
A_ : str = roberta_layer.self_attn.q_proj.weight
A_ : List[str] = roberta_layer.self_attn.q_proj.bias
A_ : int = roberta_layer.self_attn.k_proj.weight
A_ : List[Any] = roberta_layer.self_attn.k_proj.bias
A_ : Dict = roberta_layer.self_attn.v_proj.weight
A_ : int = roberta_layer.self_attn.v_proj.bias
# self-attention output
A_ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
A_ : Any = roberta_layer.self_attn.out_proj.weight
A_ : Optional[Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
A_ : Any = roberta_layer.final_layer_norm.weight
A_ : int = roberta_layer.final_layer_norm.bias
# intermediate
A_ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
A_ : int = roberta_layer.fca.weight
A_ : List[str] = roberta_layer.fca.bias
# output
A_ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
A_ : Optional[int] = roberta_layer.fca.weight
A_ : List[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
A_ : str = roberta.model.classification_heads['''mnli'''].dense.weight
A_ : int = roberta.model.classification_heads['''mnli'''].dense.bias
A_ : str = roberta.model.classification_heads['''mnli'''].out_proj.weight
A_ : Dict = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
A_ : int = roberta.model.encoder.lm_head.dense.weight
A_ : List[str] = roberta.model.encoder.lm_head.dense.bias
A_ : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.weight
A_ : int = roberta.model.encoder.lm_head.layer_norm.bias
A_ : Optional[int] = roberta.model.encoder.lm_head.weight
A_ : Dict = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
A_ : torch.Tensor = roberta.encode(SCREAMING_SNAKE_CASE ).unsqueeze(0 ) # batch of size 1
A_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE )[0]
if classification_head:
A_ : str = roberta.model.classification_heads['''mnli'''](roberta.extract_features(SCREAMING_SNAKE_CASE ) )
else:
A_ : int = roberta.model(SCREAMING_SNAKE_CASE )[0]
print(our_output.shape , their_output.shape )
A_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A_ : Tuple = torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(SCREAMING_SNAKE_CASE ).mkdir(parents=SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
UpperCamelCase = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 186 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[int] , SCREAMING_SNAKE_CASE :Optional[int]=False ) -> Union[str, Any]:
__lowerCAmelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Optional[int]=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase : Any = """"""
else:
__lowerCAmelCase : str = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase : Dict = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
__lowerCAmelCase : List[Any] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase : str = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
__lowerCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase : str = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase : List[str] = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any ) -> Any:
__lowerCAmelCase : List[Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] ) -> Any:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
__lowerCAmelCase : List[Any] = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Tuple , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :List[Any] ) -> Dict:
__lowerCAmelCase : Tuple = dct.pop(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = val
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Optional[Any] , SCREAMING_SNAKE_CASE :Dict ) -> str:
__lowerCAmelCase : Dict = ViTMSNConfig()
__lowerCAmelCase : str = 1_000
__lowerCAmelCase : str = """datasets/huggingface/label-files"""
__lowerCAmelCase : str = """imagenet-1k-id2label.json"""
__lowerCAmelCase : Optional[int] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , """r""" ) )
__lowerCAmelCase : Tuple = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowerCAmelCase : List[str] = idalabel
__lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
__lowerCAmelCase : Dict = 384
__lowerCAmelCase : str = 1_536
__lowerCAmelCase : Tuple = 6
elif "l16" in checkpoint_url:
__lowerCAmelCase : str = 1_024
__lowerCAmelCase : Any = 4_096
__lowerCAmelCase : Optional[int] = 24
__lowerCAmelCase : Tuple = 16
__lowerCAmelCase : str = 0.1
elif "b4" in checkpoint_url:
__lowerCAmelCase : int = 4
elif "l7" in checkpoint_url:
__lowerCAmelCase : Optional[Any] = 7
__lowerCAmelCase : Any = 1_024
__lowerCAmelCase : int = 4_096
__lowerCAmelCase : Optional[Any] = 24
__lowerCAmelCase : str = 16
__lowerCAmelCase : int = 0.1
__lowerCAmelCase : List[str] = ViTMSNModel(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""target_encoder"""]
__lowerCAmelCase : Dict = ViTImageProcessor(size=config.image_size )
remove_projection_head(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = create_rename_keys(SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowerCAmelCase : Optional[int] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__lowerCAmelCase : List[str] = ViTImageProcessor(
size=config.image_size , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
__lowerCAmelCase : List[str] = model(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
__lowerCAmelCase : Tuple = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
__lowerCAmelCase : int = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
__lowerCAmelCase : Optional[int] = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
__lowerCAmelCase : List[str] = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
__lowerCAmelCase : Optional[int] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_UpperCAmelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 232 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = PegasusTokenizer
A_ = PegasusTokenizerFast
A_ = True
A_ = True
def UpperCAmelCase__ ( self : List[str] )->Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : Optional[int] = PegasusTokenizer(_snake_case )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self : str )->Dict:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def UpperCAmelCase__ ( self : Optional[Any] , **_snake_case : Tuple )->PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCAmelCase__ ( self : Dict , _snake_case : List[Any] )->Tuple:
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCAmelCase__ ( self : Union[str, Any] )->Dict:
'''simple docstring'''
__lowerCAmelCase : Dict = """</s>"""
__lowerCAmelCase : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case ) , _snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case ) , _snake_case )
def UpperCAmelCase__ ( self : int )->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_snake_case ) , 1103 )
def UpperCAmelCase__ ( self : Optional[int] )->Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def UpperCAmelCase__ ( self : Dict )->str:
'''simple docstring'''
__lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : str = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
__lowerCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
__lowerCAmelCase : Tuple = py_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : Optional[int] )->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCAmelCase : Tuple = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
__lowerCAmelCase : List[str] = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
__lowerCAmelCase : str = tokenizer([raw_input_str] , return_tensors=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
def UpperCAmelCase__ ( self : List[str] )->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCAmelCase : Tuple = """To ensure a smooth flow of bank resolutions."""
__lowerCAmelCase : Optional[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
__lowerCAmelCase : int = tokenizer([raw_input_str] , return_tensors=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ ( self : Any )->Any:
'''simple docstring'''
__lowerCAmelCase : List[Any] = ["""This is going to be way too long.""" * 150, """short example"""]
__lowerCAmelCase : Union[str, Any] = ["""not super long but more than 5 tokens""", """tiny"""]
__lowerCAmelCase : Dict = self._large_tokenizer(_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
__lowerCAmelCase : Tuple = self._large_tokenizer(
text_target=_snake_case , max_length=5 , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(_snake_case ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = {"""input_ids""": [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __lowercase ,unittest.TestCase ):
A_ = PegasusTokenizer
A_ = PegasusTokenizerFast
A_ = True
A_ = True
def UpperCAmelCase__ ( self : Tuple )->Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : Any = PegasusTokenizer(_snake_case , offset=0 , mask_token_sent=_snake_case , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ ( self : Any )->str:
'''simple docstring'''
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def UpperCAmelCase__ ( self : Union[str, Any] , **_snake_case : Optional[Any] )->PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def UpperCAmelCase__ ( self : List[str] , _snake_case : Optional[int] )->Union[str, Any]:
'''simple docstring'''
return ("This is a test", "This is a test")
def UpperCAmelCase__ ( self : List[Any] )->str:
'''simple docstring'''
__lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCAmelCase : int = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
__lowerCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
__lowerCAmelCase : Tuple = py_tokenizer([raw_input_str] , return_tensors=_snake_case , add_special_tokens=_snake_case ).input_ids[0]
self.assertListEqual(_snake_case , _snake_case )
@require_torch
def UpperCAmelCase__ ( self : str )->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : int = ["""This is going to be way too long.""" * 1000, """short example"""]
__lowerCAmelCase : Optional[int] = ["""not super long but more than 5 tokens""", """tiny"""]
__lowerCAmelCase : str = self._large_tokenizer(_snake_case , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
__lowerCAmelCase : List[Any] = self._large_tokenizer(
text_target=_snake_case , max_length=5 , padding=_snake_case , truncation=_snake_case , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(_snake_case ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
__lowerCAmelCase : Optional[Any] = self._large_tokenizer(_snake_case ).input_ids
self.assertListEqual(
_snake_case , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , ) | 232 | 1 |
import operator
def lowerCamelCase__ ( A__ : list , A__ : bool = False , A__ : list | None = None ):
'''simple docstring'''
__lowerCamelCase = operator.lt if reverse else operator.gt
__lowerCamelCase = solution or []
if not arr:
return solution
__lowerCamelCase = [arr.pop(0 )]
for i, item in enumerate(A__ ):
if _operator(A__ , sublist[-1] ):
sublist.append(A__ )
arr.pop(A__ )
# merging sublist into solution list
if not solution:
solution.extend(A__ )
else:
while sublist:
__lowerCamelCase = sublist.pop(0 )
for i, xx in enumerate(A__ ):
if not _operator(A__ , A__ ):
solution.insert(A__ , A__ )
break
else:
solution.append(A__ )
strand_sort(A__ , A__ , A__ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 12 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = ''
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 12 | 1 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
for i in range(0 , _UpperCamelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
for i in range(_UpperCamelCase , 0 , -1 ):
for _ in range(_UpperCamelCase , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> int:
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(_UpperCamelCase ) # upper half
reverse_floyd(_UpperCamelCase ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
__lowercase: Dict = 1
while K:
__lowercase: List[Any] = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__lowercase: Optional[int] = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...") | 31 |
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main() | 31 | 1 |
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a (lowerCAmelCase__ ):
"""simple docstring"""
__UpperCAmelCase : Dict = """dandelin/vilt-b32-finetuned-vqa"""
__UpperCAmelCase : str = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
__UpperCAmelCase : List[str] = """image_qa"""
__UpperCAmelCase : Any = AutoProcessor
__UpperCAmelCase : int = AutoModelForVisualQuestionAnswering
__UpperCAmelCase : str = ["""image""", """text"""]
__UpperCAmelCase : Dict = ["""text"""]
def __init__( self : Any , *lowerCamelCase : str , **lowerCamelCase : Optional[int] ) -> Tuple:
requires_backends(self , ["vision"] )
super().__init__(*__a , **__a )
def __snake_case ( self : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Tuple ) -> Optional[Any]:
return self.pre_processor(__a , __a , return_tensors="pt" )
def __snake_case ( self : Any , lowerCamelCase : List[str] ) -> List[str]:
with torch.no_grad():
return self.model(**__a ).logits
def __snake_case ( self : Any , lowerCamelCase : Union[str, Any] ) -> Tuple:
__snake_case : Dict = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 123 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 57 | 0 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> str:
if collection == []:
return []
# get some information about the collection
UpperCAmelCase_ = len(_lowercase )
UpperCAmelCase_ = max(_lowercase )
UpperCAmelCase_ = min(_lowercase )
# create the counting array
UpperCAmelCase_ = coll_max + 1 - coll_min
UpperCAmelCase_ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , _lowercase ):
UpperCAmelCase_ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
UpperCAmelCase_ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , _lowercase ) ):
UpperCAmelCase_ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> Any:
return "".join([chr(_lowercase ) for i in counting_sort([ord(_lowercase ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 363 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 177 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = params
SCREAMING_SNAKE_CASE__ : List[str] = np.array(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = np.array([len(SCREAMING_SNAKE_CASE__ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__(self , SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__(self ) -> int:
"""simple docstring"""
return len(self.lengths )
def __magic_name__ (self ) -> str:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.params.max_model_input_size
SCREAMING_SNAKE_CASE__ : Dict = self.lengths > max_len
logger.info(F'''Splitting {sum(SCREAMING_SNAKE_CASE__ )} too long sequences.''' )
def divide_chunks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return [l[i : i + n] for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )]
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Any = []
if self.params.mlm:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
SCREAMING_SNAKE_CASE__ : int = np.insert(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ )
if sub_s[-1] != sep_id:
SCREAMING_SNAKE_CASE__ : str = np.insert(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(SCREAMING_SNAKE_CASE__ )
new_tok_ids.extend(SCREAMING_SNAKE_CASE__ )
new_lengths.extend([len(SCREAMING_SNAKE_CASE__ ) for l in sub_seqs] )
SCREAMING_SNAKE_CASE__ : Dict = np.array(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = np.array(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = len(self )
SCREAMING_SNAKE_CASE__ : List[Any] = self.lengths > 11
SCREAMING_SNAKE_CASE__ : Dict = self.token_ids[indices]
SCREAMING_SNAKE_CASE__ : Optional[int] = self.lengths[indices]
SCREAMING_SNAKE_CASE__ : str = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.params.special_tok_ids["""unk_token"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(self )
SCREAMING_SNAKE_CASE__ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
SCREAMING_SNAKE_CASE__ : List[str] = (unk_occs / self.lengths) < 0.5
SCREAMING_SNAKE_CASE__ : List[str] = self.token_ids[indices]
SCREAMING_SNAKE_CASE__ : List[Any] = self.lengths[indices]
SCREAMING_SNAKE_CASE__ : List[str] = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [t[0] for t in batch]
SCREAMING_SNAKE_CASE__ : Tuple = [t[1] for t in batch]
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
# Max for paddings
SCREAMING_SNAKE_CASE__ : int = max(SCREAMING_SNAKE_CASE__ )
# Pad token ids
if self.params.mlm:
SCREAMING_SNAKE_CASE__ : Any = self.params.special_tok_ids["""pad_token"""]
else:
SCREAMING_SNAKE_CASE__ : List[str] = self.params.special_tok_ids["""unk_token"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [list(t.astype(SCREAMING_SNAKE_CASE__ ) ) + [pad_idx] * (max_seq_len_ - len(SCREAMING_SNAKE_CASE__ )) for t in token_ids]
assert len(tk_ ) == len(SCREAMING_SNAKE_CASE__ )
assert all(len(SCREAMING_SNAKE_CASE__ ) == max_seq_len_ for t in tk_ )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(tk_ ) # (bs, max_seq_len_)
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(SCREAMING_SNAKE_CASE__ ) # (bs)
return tk_t, lg_t
| 25 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
lowerCamelCase__ : str = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
lowerCamelCase__ : List[Any] = re.compile(r'''([a-z\d])([A-Z])''')
lowerCamelCase__ : int = re.compile(r'''(?<!_)_(?!_)''')
lowerCamelCase__ : List[str] = re.compile(r'''(_{2,})''')
lowerCamelCase__ : List[Any] = r'''^\w+(\.\w+)*$'''
lowerCamelCase__ : str = r'''<>:/\|?*'''
def UpperCamelCase ( _lowerCAmelCase : Optional[int] ) -> str:
_UpperCAmelCase : Any = _uppercase_uppercase_re.sub(R"""\1_\2""", _lowerCAmelCase )
_UpperCAmelCase : Tuple = _lowercase_uppercase_re.sub(R"""\1_\2""", _lowerCAmelCase )
return name.lower()
def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> Tuple:
_UpperCAmelCase : Optional[Any] = _single_underscore_re.split(_lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = [_multiple_underscores_re.split(_lowerCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_lowerCAmelCase ) if n != """""" )
def UpperCamelCase ( _lowerCAmelCase : Optional[Any] ) -> str:
if os.path.basename(_lowerCAmelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(_lowerCAmelCase )
def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : Tuple ) -> int:
if os.path.basename(_lowerCAmelCase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re, _lowerCAmelCase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(_lowerCAmelCase )}-{split}'''
def UpperCamelCase ( _lowerCAmelCase : Dict, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple=None ) -> List[Any]:
_UpperCAmelCase : Optional[int] = filename_prefix_for_split(_lowerCAmelCase, _lowerCAmelCase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
_UpperCAmelCase : int = os.path.join(_lowerCAmelCase, _lowerCAmelCase )
return f'''{filepath}*'''
def UpperCamelCase ( _lowerCAmelCase : str, _lowerCAmelCase : str, _lowerCAmelCase : List[Any], _lowerCAmelCase : Any=None, _lowerCAmelCase : int=None ) -> str:
_UpperCAmelCase : Union[str, Any] = filename_prefix_for_split(_lowerCAmelCase, _lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = os.path.join(_lowerCAmelCase, _lowerCAmelCase )
if shard_lengths:
_UpperCAmelCase : List[str] = len(_lowerCAmelCase )
_UpperCAmelCase : List[Any] = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(_lowerCAmelCase )]
if filetype_suffix:
_UpperCAmelCase : Union[str, Any] = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
_UpperCAmelCase : Any = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 246 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase : int = [image]
_UpperCAmelCase : str = [trans(img.convert("RGB" ) ) for img in image]
_UpperCAmelCase : Optional[Any] = torch.stack(__lowerCAmelCase )
return image
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str ) ->Union[str, Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = min(int(num_inference_steps * strength ) , lowerCamelCase__ )
_UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=None ) ->str:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase__ )}""" )
_UpperCAmelCase : Union[str, Any] = image.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : List[str] = init_latents.shape
_UpperCAmelCase : Optional[int] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
# get latents
print("add noise to latents at timestep" , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , lowerCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase__ : float = 0.8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowerCamelCase__ )
# 2. Preprocess image
_UpperCAmelCase : Dict = preprocess(lowerCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(lowerCamelCase__ , lowerCamelCase__ , self.device )
_UpperCAmelCase : List[Any] = timesteps[:1].repeat(lowerCamelCase__ )
# 4. Prepare latent variables
_UpperCAmelCase : Optional[int] = self.prepare_latents(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.unet.dtype , self.device , lowerCamelCase__ )
_UpperCAmelCase : Any = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase__ ):
# 1. predict noise model_output
_UpperCAmelCase : Union[str, Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , eta=lowerCamelCase__ , use_clipped_model_output=lowerCamelCase__ , generator=lowerCamelCase__ , ).prev_sample
_UpperCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase__ )
| 322 |
'''simple docstring'''
import pytest
lowerCamelCase__ = '__dummy_dataset1__'
lowerCamelCase__ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n'
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def __lowerCAmelCase ():
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = dataset_loading_script_name
_UpperCAmelCase : Any = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=__lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = script_dir / F"""{script_name}.py"""
with open(__lowerCAmelCase , "w" ) as f:
f.write(__lowerCAmelCase )
return str(__lowerCAmelCase )
| 322 | 1 |
import os
import pytest
from attr import dataclass
__UpperCAmelCase = 'us-east-1' # defaults region
@dataclass
class lowerCamelCase :
'''simple docstring'''
_snake_case : str
_snake_case : List[Any] = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
_snake_case : Any = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 1_6,
'''per_device_eval_batch_size''': 1_6,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 5_0_0,
'''save_steps''': 5_5_0_0,
}
_snake_case : Tuple = {**hyperparameters, '''max_steps''': 1_0_0_0}
@property
def __UpperCAmelCase ( self ) -> str:
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __UpperCAmelCase ( self ) -> str:
return f"{self.framework}-transfromers-test"
@property
def __UpperCAmelCase ( self ) -> str:
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __UpperCAmelCase ( self ) -> str:
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def lowercase__ ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Any = SageMakerTestEnvironment(framework=request.cls.framework )
| 29 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16 | 0 |
import os
def lowercase__ ( )-> Optional[int]:
with open(os.path.dirname(__UpperCamelCase ) + """/grid.txt""" ) as f:
UpperCamelCase = [] # noqa: E741
for _ in range(20 ):
l.append([int(__UpperCamelCase ) for x in f.readline().split()] )
UpperCamelCase = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCamelCase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCamelCase = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCamelCase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCamelCase = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCamelCase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCamelCase = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCamelCase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCamelCase = temp
return maximum
if __name__ == "__main__":
print(solution())
| 360 |
'''simple docstring'''
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE__ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
SCREAMING_SNAKE_CASE__ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
SCREAMING_SNAKE_CASE__ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , reference_urls=[] , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , ) -> List[Any]:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase = np.array([re.sub(_SCREAMING_SNAKE_CASE , """""" , _SCREAMING_SNAKE_CASE ) for x in predictions] )
UpperCamelCase = np.array([re.sub(_SCREAMING_SNAKE_CASE , """""" , _SCREAMING_SNAKE_CASE ) for x in references] )
else:
UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.asarray(_SCREAMING_SNAKE_CASE )
if ignore_case:
UpperCamelCase = np.char.lower(_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.char.lower(_SCREAMING_SNAKE_CASE )
if ignore_punctuation:
UpperCamelCase = string.punctuation.maketrans("""""" , """""" , string.punctuation )
UpperCamelCase = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
if ignore_numbers:
UpperCamelCase = string.digits.maketrans("""""" , """""" , string.digits )
UpperCamelCase = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
UpperCamelCase = np.char.translate(_SCREAMING_SNAKE_CASE , table=_SCREAMING_SNAKE_CASE )
UpperCamelCase = predictions == references
return {"exact_match": np.mean(_SCREAMING_SNAKE_CASE ) * 100}
| 183 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
set_seed(770)
lowercase_ = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
lowercase_ = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
lowercase_ = os.path.dirname(os.path.abspath(__file__))
lowercase_ = os.path.join(os.path.expanduser("""~"""), """.cache""")
lowercase_ = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def lowerCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple=False ) ->Any:
_SCREAMING_SNAKE_CASE = model_type
if use_small:
key += "_small"
return os.path.join(__lowerCamelCase , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def lowerCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : int ) ->Any:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
hf_hub_download(repo_id=__lowerCamelCase , filename=__lowerCamelCase , local_dir=__lowerCamelCase )
def lowerCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]="text" ) ->Optional[int]:
if model_type == "text":
_SCREAMING_SNAKE_CASE = BarkSemanticModel
_SCREAMING_SNAKE_CASE = BarkSemanticConfig
_SCREAMING_SNAKE_CASE = BarkSemanticGenerationConfig
elif model_type == "coarse":
_SCREAMING_SNAKE_CASE = BarkCoarseModel
_SCREAMING_SNAKE_CASE = BarkCoarseConfig
_SCREAMING_SNAKE_CASE = BarkCoarseGenerationConfig
elif model_type == "fine":
_SCREAMING_SNAKE_CASE = BarkFineModel
_SCREAMING_SNAKE_CASE = BarkFineConfig
_SCREAMING_SNAKE_CASE = BarkFineGenerationConfig
else:
raise NotImplementedError()
_SCREAMING_SNAKE_CASE = F'{model_type}_small' if use_small else model_type
_SCREAMING_SNAKE_CASE = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__lowerCamelCase ):
logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
_SCREAMING_SNAKE_CASE = torch.load(__lowerCamelCase , map_location=__lowerCamelCase )
# this is a hack
_SCREAMING_SNAKE_CASE = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
_SCREAMING_SNAKE_CASE = model_args["""vocab_size"""]
_SCREAMING_SNAKE_CASE = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_SCREAMING_SNAKE_CASE = model_args.pop("""n_head""" )
_SCREAMING_SNAKE_CASE = model_args.pop("""n_embd""" )
_SCREAMING_SNAKE_CASE = model_args.pop("""n_layer""" )
_SCREAMING_SNAKE_CASE = ConfigClass(**checkpoint["""model_args"""] )
_SCREAMING_SNAKE_CASE = ModelClass(config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = GenerationConfigClass()
_SCREAMING_SNAKE_CASE = model_generation_config
_SCREAMING_SNAKE_CASE = checkpoint["""model"""]
# fixup checkpoint
_SCREAMING_SNAKE_CASE = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(__lowerCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
_SCREAMING_SNAKE_CASE = k[len(__lowerCamelCase ) :]
for old_layer_name in new_layer_name_dict:
_SCREAMING_SNAKE_CASE = new_k.replace(__lowerCamelCase , new_layer_name_dict[old_layer_name] )
_SCREAMING_SNAKE_CASE = state_dict.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = set(state_dict.keys() ) - set(model.state_dict().keys() )
_SCREAMING_SNAKE_CASE = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
_SCREAMING_SNAKE_CASE = set(model.state_dict().keys() ) - set(state_dict.keys() )
_SCREAMING_SNAKE_CASE = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(__lowerCamelCase ) != 0:
raise ValueError(F'extra keys found: {extra_keys}' )
if len(__lowerCamelCase ) != 0:
raise ValueError(F'missing keys: {missing_keys}' )
model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = model.num_parameters(exclude_embeddings=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = checkpoint["""best_val_loss"""].item()
logger.info(F'model loaded: {round(n_params/1e6 , 1 )}M params, {round(__lowerCamelCase , 3 )} loss' )
model.eval()
model.to(__lowerCamelCase )
del checkpoint, state_dict
return model
def lowerCamelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any=False , __lowerCamelCase : Union[str, Any]="text" ) ->Tuple:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_SCREAMING_SNAKE_CASE = """cpu""" # do conversion on cpu
_SCREAMING_SNAKE_CASE = _get_ckpt_path(__lowerCamelCase , use_small=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = _load_model(__lowerCamelCase , __lowerCamelCase , model_type=__lowerCamelCase , use_small=__lowerCamelCase )
# load bark initial model
_SCREAMING_SNAKE_CASE = _bark_load_model(__lowerCamelCase , """cpu""" , model_type=__lowerCamelCase , use_small=__lowerCamelCase )
if model_type == "text":
_SCREAMING_SNAKE_CASE = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=__lowerCamelCase ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
_SCREAMING_SNAKE_CASE = 5
_SCREAMING_SNAKE_CASE = 10
if model_type in ["text", "coarse"]:
_SCREAMING_SNAKE_CASE = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_SCREAMING_SNAKE_CASE = bark_model(__lowerCamelCase )[0]
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
# take last logits
_SCREAMING_SNAKE_CASE = output_new_model_total.logits[:, [-1], :]
else:
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 8
_SCREAMING_SNAKE_CASE = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_SCREAMING_SNAKE_CASE = model(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = bark_model(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def lowerCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , ) ->List[str]:
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = BarkSemanticConfig.from_pretrained(os.path.join(__lowerCamelCase , """config.json""" ) )
_SCREAMING_SNAKE_CASE = BarkCoarseConfig.from_pretrained(os.path.join(__lowerCamelCase , """config.json""" ) )
_SCREAMING_SNAKE_CASE = BarkFineConfig.from_pretrained(os.path.join(__lowerCamelCase , """config.json""" ) )
_SCREAMING_SNAKE_CASE = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
_SCREAMING_SNAKE_CASE = BarkSemanticModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = BarkCoarseModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = BarkFineModel.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
_SCREAMING_SNAKE_CASE = BarkConfig.from_sub_model_configs(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_SCREAMING_SNAKE_CASE = BarkModel(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = semantic
_SCREAMING_SNAKE_CASE = coarseAcoustic
_SCREAMING_SNAKE_CASE = fineAcoustic
_SCREAMING_SNAKE_CASE = codec
_SCREAMING_SNAKE_CASE = bark_generation_config
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
bark.save_pretrained(__lowerCamelCase , repo_id=__lowerCamelCase , push_to_hub=__lowerCamelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
lowercase_ = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 58 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def lowerCamelCase ( __lowerCamelCase : str ) ->str:
if not sentence:
return ""
_SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , __lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58 | 1 |
'''simple docstring'''
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = [1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = 0, 0, 0
_lowerCAmelCase : Optional[Any] = ugly_nums[ia] * 2
_lowerCAmelCase : Optional[int] = ugly_nums[ia] * 3
_lowerCAmelCase : Optional[int] = ugly_nums[ia] * 5
for _ in range(1 , _A ):
_lowerCAmelCase : Any = min(_A , _A , _A )
ugly_nums.append(_A )
if next_num == next_a:
ia += 1
_lowerCAmelCase : List[Any] = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_lowerCAmelCase : Union[str, Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_lowerCAmelCase : Any = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(2_00) = }''')
| 25 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 25 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "Salesforce/blip-image-captioning-base"
UpperCAmelCase__ : int = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
UpperCAmelCase__ : Optional[int] = "image_captioner"
UpperCAmelCase__ : Tuple = AutoModelForVisionaSeq
UpperCAmelCase__ : Union[str, Any] = ["image"]
UpperCAmelCase__ : Tuple = ["text"]
def __init__( self , *A_ , **A_ ) -> str:
requires_backends(self , ['vision'] )
super().__init__(*A_ , **A_ )
def _a ( self , A_ ) -> Optional[int]:
return self.pre_processor(images=A_ , return_tensors='pt' )
def _a ( self , A_ ) -> Any:
return self.model.generate(**A_ )
def _a ( self , A_ ) -> str:
return self.pre_processor.batch_decode(A_ , skip_special_tokens=A_ )[0].strip()
| 62 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.txt'}
a_ = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a_ = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
a_ = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =ConvBertTokenizer
def __init__( self : List[str] , a : Union[str, Any]=None , a : Optional[int]=None , a : int=True , a : Tuple="[UNK]" , a : Dict="[SEP]" , a : Dict="[PAD]" , a : List[Any]="[CLS]" , a : Tuple="[MASK]" , a : Dict=True , a : Optional[Any]=None , **a : str , ) -> Dict:
"""simple docstring"""
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
SCREAMING_SNAKE_CASE : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , a ) != do_lower_case
or normalizer_state.get("strip_accents" , a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , a ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE : List[str] = getattr(a , normalizer_state.pop("type" ) )
SCREAMING_SNAKE_CASE : Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE : Any = strip_accents
SCREAMING_SNAKE_CASE : Optional[int] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE : List[str] = normalizer_class(**a )
SCREAMING_SNAKE_CASE : str = do_lower_case
def __UpperCamelCase ( self : Union[str, Any] , a : List[Any] , a : int=None ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self : Tuple , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self._tokenizer.model.save(a , name=a )
return tuple(a ) | 76 | 0 |
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE :Optional[int] = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE :Dict = {
"""Salesforce/codegen-350M-mono""": 2048,
}
class __magic_name__ ( snake_case ):
UpperCamelCase_ :List[str] = VOCAB_FILES_NAMES
UpperCamelCase_ :Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :List[str] = ["""input_ids""", """attention_mask"""]
UpperCamelCase_ :Dict = CodeGenTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase="<|endoftext|>" , _lowercase="<|endoftext|>" , _lowercase="<|endoftext|>" , _lowercase=False , **_lowercase , )-> Union[str, Any]:
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , unk_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
if kwargs.pop("add_bos_token" , _lowercase ):
UpperCamelCase_ = kwargs.pop("name_or_path" , "" )
raise ValueError(
"Currenty GPT2's fast tokenizer does NOT support adding a BOS token."
"Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n"
F"`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n"
F"`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n"
"This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005."
" so that the fast tokenizer works correctly." )
UpperCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _lowercase ) != add_prefix_space:
UpperCamelCase_ = getattr(_lowercase , pre_tok_state.pop("type" ) )
UpperCamelCase_ = add_prefix_space
UpperCamelCase_ = pre_tok_class(**_lowercase )
UpperCamelCase_ = add_prefix_space
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> BatchEncoding:
UpperCamelCase_ = kwargs.get("is_split_into_words" , _lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , *_lowercase , **_lowercase )-> BatchEncoding:
UpperCamelCase_ = kwargs.get("is_split_into_words" , _lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowercase , **_lowercase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase = None )-> Tuple[str]:
UpperCamelCase_ = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
def UpperCAmelCase_ ( self , _lowercase , _lowercase = False , _lowercase = None , _lowercase = None , **_lowercase , )-> str:
UpperCamelCase_ = super().decode(
token_ids=_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , **_lowercase , )
if truncate_before_pattern is not None and len(_lowercase ) > 0:
UpperCamelCase_ = self.truncate(_lowercase , _lowercase )
return decoded_text
def UpperCAmelCase_ ( self , _lowercase , _lowercase )-> Tuple:
def find_re(_lowercase , _lowercase , _lowercase ):
UpperCamelCase_ = pattern.search(_lowercase , _lowercase )
return m.start() if m else -1
UpperCamelCase_ = [re.compile(_lowercase , re.MULTILINE ) for pattern in truncate_before_pattern]
UpperCamelCase_ = list(re.finditer("^print" , _lowercase , re.MULTILINE ) )
if len(_lowercase ) > 1:
UpperCamelCase_ = completion[: prints[1].start()]
UpperCamelCase_ = list(re.finditer("^def" , _lowercase , re.MULTILINE ) )
if len(_lowercase ) > 1:
UpperCamelCase_ = completion[: defs[1].start()]
UpperCamelCase_ = 0
UpperCamelCase_ = [
pos for pos in [find_re(_lowercase , _lowercase , _lowercase ) for terminal in terminals] if pos != -1
]
if len(_lowercase ) > 0:
return completion[: min(_lowercase )]
else:
return completion
| 60 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( snake_case ):
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )-> Optional[Any]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=_lowercase , speech_processor=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , feature_extractor=_lowercase , )
def UpperCAmelCase_ ( self , _lowercase = "auto" )-> str:
if slice_size == "auto":
UpperCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
self.enable_attention_slicing(_lowercase )
@torch.no_grad()
def __call__( self , _lowercase , _lowercase=16_000 , _lowercase = 512 , _lowercase = 512 , _lowercase = 50 , _lowercase = 7.5 , _lowercase = None , _lowercase = 1 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , **_lowercase , )-> str:
UpperCamelCase_ = self.speech_processor.feature_extractor(
_lowercase , return_tensors="pt" , sampling_rate=_lowercase ).input_features.to(self.device )
UpperCamelCase_ = self.speech_model.generate(_lowercase , max_length=480_000 )
UpperCamelCase_ = self.speech_processor.tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , normalize=_lowercase )[
0
]
if isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = 1
elif isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = len(_lowercase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_lowercase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_lowercase )}." )
# get prompt text embeddings
UpperCamelCase_ = self.tokenizer(
_lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = text_embeddings.shape
UpperCamelCase_ = text_embeddings.repeat(1 , _lowercase , 1 )
UpperCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , _lowercase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase_ = 42
if negative_prompt is None:
UpperCamelCase_ = [""] * batch_size
elif type(_lowercase ) is not type(_lowercase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(_lowercase )} !="
F" {type(_lowercase )}." )
elif isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = [negative_prompt]
elif batch_size != len(_lowercase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(_lowercase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
UpperCamelCase_ = negative_prompt
UpperCamelCase_ = text_input_ids.shape[-1]
UpperCamelCase_ = self.tokenizer(
_lowercase , padding="max_length" , max_length=_lowercase , truncation=_lowercase , return_tensors="pt" , )
UpperCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ = uncond_embeddings.shape[1]
UpperCamelCase_ = uncond_embeddings.repeat(1 , _lowercase , 1 )
UpperCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase_ = torch.randn(_lowercase , generator=_lowercase , device="cpu" , dtype=_lowercase ).to(
self.device )
else:
UpperCamelCase_ = torch.randn(_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase_ = {}
if accepts_eta:
UpperCamelCase_ = eta
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ = self.scheduler.scale_model_input(_lowercase , _lowercase )
# predict the noise residual
UpperCamelCase_ = self.unet(_lowercase , _lowercase , encoder_hidden_states=_lowercase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase_ , UpperCamelCase_ = noise_pred.chunk(2 )
UpperCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = 1 / 0.18_215 * latents
UpperCamelCase_ = self.vae.decode(_lowercase ).sample
UpperCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(_lowercase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_lowercase , nsfw_content_detected=_lowercase )
| 60 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = StableDiffusionXLImgaImgPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCamelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_UpperCAmelCase = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=32 , )
_UpperCAmelCase = CLIPTextModel(UpperCAmelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=UpperCAmelCase )
_UpperCAmelCase = CLIPTextModelWithProjection(UpperCAmelCase )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=UpperCAmelCase )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'text_encoder_2': text_encoder_a,
'tokenizer_2': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0 ):
"""simple docstring"""
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
_UpperCAmelCase = image / 2 + 0.5
if str(UpperCAmelCase ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(UpperCAmelCase )
else:
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 5.0,
'output_type': 'numpy',
'strength': 0.75,
}
return inputs
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase )
_UpperCAmelCase = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase )
_UpperCAmelCase = sd_pipe(**UpperCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self ):
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def UpperCamelCase ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase )
_UpperCAmelCase = sd_pipe.to(UpperCAmelCase )
_UpperCAmelCase = sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
# forward without prompt embeds
_UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase )
_UpperCAmelCase = 3 * ['this is a negative prompt']
_UpperCAmelCase = negative_prompt
_UpperCAmelCase = 3 * [inputs['prompt']]
_UpperCAmelCase = sd_pipe(**UpperCAmelCase )
_UpperCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_UpperCAmelCase = self.get_dummy_inputs(UpperCAmelCase )
_UpperCAmelCase = 3 * ['this is a negative prompt']
_UpperCAmelCase = 3 * [inputs.pop('prompt' )]
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = sd_pipe.encode_prompt(UpperCAmelCase , negative_prompt=UpperCAmelCase )
_UpperCAmelCase = sd_pipe(
**UpperCAmelCase , prompt_embeds=UpperCAmelCase , negative_prompt_embeds=UpperCAmelCase , pooled_prompt_embeds=UpperCAmelCase , negative_pooled_prompt_embeds=UpperCAmelCase , )
_UpperCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase="cpu" , UpperCAmelCase=torch.floataa , UpperCAmelCase=0 ):
"""simple docstring"""
_UpperCAmelCase = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
_UpperCAmelCase = np.random.RandomState(UpperCAmelCase ).standard_normal((1, 4, 64, 64) )
_UpperCAmelCase = torch.from_numpy(UpperCAmelCase ).to(device=UpperCAmelCase , dtype=UpperCAmelCase )
_UpperCAmelCase = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
_UpperCAmelCase = self.get_inputs(UpperCAmelCase )
_UpperCAmelCase = pipe(**UpperCAmelCase ).images
_UpperCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_UpperCAmelCase = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 39 |
def __A ( __lowerCAmelCase )-> list:
"""simple docstring"""
if len(__lowerCAmelCase ) < 2:
return collection
def circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> bool:
_UpperCAmelCase = False
if low == high:
return swapped
_UpperCAmelCase = low
_UpperCAmelCase = high
while left < right:
if collection[left] > collection[right]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right],
collection[left],
)
_UpperCAmelCase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_UpperCAmelCase , _UpperCAmelCase = (
collection[right + 1],
collection[left],
)
_UpperCAmelCase = True
_UpperCAmelCase = low + int((high - low) / 2 )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , mid + 1 , __lowerCAmelCase )
return swapped or left_swap or right_swap
_UpperCAmelCase = True
while is_not_sorted is True:
_UpperCAmelCase = circle_sort_util(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return collection
if __name__ == "__main__":
_a = input('''Enter numbers separated by a comma:\n''').strip()
_a = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 39 | 1 |
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = (IPNDMScheduler,)
lowerCamelCase = (('num_inference_steps', 50),)
def snake_case__ ( self : List[Any],**lowercase_ : Any )-> Optional[Any]:
'''simple docstring'''
A__ = {'num_train_timesteps': 1_0_0_0}
config.update(**lowercase_ )
return config
def snake_case__ ( self : Optional[Any],lowercase_ : List[str]=0,**lowercase_ : Tuple )-> Union[str, Any]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('num_inference_steps',lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
if time_step is None:
A__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals
A__ = dummy_past_residuals[:]
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self : str )-> int:
'''simple docstring'''
pass
def snake_case__ ( self : Tuple,lowercase_ : List[str]=0,**lowercase_ : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('num_inference_steps',lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
scheduler.set_timesteps(lowercase_ )
# copy over dummy past residuals (must be after setting timesteps)
A__ = dummy_past_residuals[:]
if time_step is None:
A__ = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowercase_ )
A__ = scheduler_class.from_pretrained(lowercase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowercase_ )
# copy over dummy past residual (must be after setting timesteps)
A__ = dummy_past_residuals[:]
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = new_scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self : str,**lowercase_ : List[Any] )-> int:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**lowercase_ )
A__ = scheduler_class(**lowercase_ )
A__ = 1_0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
A__ = model(lowercase_,lowercase_ )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_ ).prev_sample
return sample
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
A__ = dict(self.forward_default_kwargs )
A__ = kwargs.pop('num_inference_steps',lowercase_ )
for scheduler_class in self.scheduler_classes:
A__ = self.get_scheduler_config()
A__ = scheduler_class(**lowercase_ )
A__ = self.dummy_sample
A__ = 0.1 * sample
if num_inference_steps is not None and hasattr(lowercase_,'set_timesteps' ):
scheduler.set_timesteps(lowercase_ )
elif num_inference_steps is not None and not hasattr(lowercase_,'set_timesteps' ):
A__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A__ = dummy_past_residuals[:]
A__ = scheduler.timesteps[5]
A__ = scheduler.timesteps[6]
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
self.assertEqual(output_a.shape,sample.shape )
self.assertEqual(output_a.shape,output_a.shape )
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
A__ = scheduler.step(lowercase_,lowercase_,lowercase_,**lowercase_ ).prev_sample
self.assertEqual(output_a.shape,sample.shape )
self.assertEqual(output_a.shape,output_a.shape )
def snake_case__ ( self : List[str] )-> Union[str, Any]:
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowercase_,time_step=lowercase_ )
def snake_case__ ( self : List[Any] )-> List[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0],[1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=lowercase_,time_step=lowercase_ )
def snake_case__ ( self : str )-> Union[str, Any]:
'''simple docstring'''
A__ = self.full_loop()
A__ = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0
| 282 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase_ = getLogger(__name__)
lowercase_ = "cuda" if torch.cuda.is_available() else "cpu"
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict="summarization" , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : str , ) -> Dict:
'''simple docstring'''
A__ = Path(SCREAMING_SNAKE_CASE__ ).open('w' , encoding='utf-8' )
A__ = str(SCREAMING_SNAKE_CASE__ )
A__ = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
if fpaa:
A__ = model.half()
A__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
A__ = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if prefix is None:
A__ = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ):
A__ = [prefix + text for text in examples_chunk]
A__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE__ , padding='longest' ).to(SCREAMING_SNAKE_CASE__ )
A__ = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE__ , )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
A__ = int(time.time() - start_time ) # seconds
A__ = len(SCREAMING_SNAKE_CASE__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _snake_case( ) -> Tuple:
'''simple docstring'''
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int]=True ) -> Dict:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument('model_name' , type=SCREAMING_SNAKE_CASE__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=SCREAMING_SNAKE_CASE__ , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=SCREAMING_SNAKE_CASE__ , help='where to save summaries' )
parser.add_argument('--reference_path' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=SCREAMING_SNAKE_CASE__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=SCREAMING_SNAKE_CASE__ , default=8 , required=SCREAMING_SNAKE_CASE__ , help='batch size' )
parser.add_argument(
'--n_obs' , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=SCREAMING_SNAKE_CASE__ , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
A__ , A__ = parser.parse_known_args()
A__ = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE__ )
if parsed_args and verbose:
print(f'parsed the following generate kwargs: {parsed_args}' )
A__ = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
A__ = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
A__ = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE__ , )
if args.reference_path is None:
return {}
# Compute scores
A__ = calculate_bleu if 'translation' in args.task else calculate_rouge
A__ = [x.rstrip() for x in open(args.save_path ).readlines()]
A__ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE__ )]
A__ = score_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
scores.update(SCREAMING_SNAKE_CASE__ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE__ )
if args.info:
A__ = args.info
if verbose:
print(SCREAMING_SNAKE_CASE__ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE__ , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 282 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class UpperCamelCase ( _lowerCAmelCase ):
def _lowercase (self : Any) -> int:
__snake_case : Any = tempfile.mkdtemp()
__snake_case : Optional[int] = 8
# DPR tok
__snake_case : Optional[int] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__snake_case : str = os.path.join(self.tmpdirname , 'dpr_tokenizer')
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_)
__snake_case : Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE_ , DPR_VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
# BART tok
__snake_case : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__snake_case : int = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_))))
__snake_case : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__snake_case : Optional[int] = {"""unk_token""": """<unk>"""}
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , 'bart_tokenizer')
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_)
__snake_case : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['vocab_file'])
__snake_case : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_ , BART_VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_))
def _lowercase (self : Tuple) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer'))
def _lowercase (self : Dict) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer'))
def _lowercase (self : Union[str, Any]) -> str:
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def _lowercase (self : int) -> Optional[int]:
__snake_case : Optional[Any] = os.path.join(self.tmpdirname , 'rag_tokenizer')
__snake_case : Tuple = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict())
__snake_case : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer())
rag_config.save_pretrained(SCREAMING_SNAKE_CASE_)
rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_)
__snake_case : Optional[Any] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ , config=SCREAMING_SNAKE_CASE_)
self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE_)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE_)
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab())
@slow
def _lowercase (self : Optional[int]) -> Any:
__snake_case : str = RagTokenizer.from_pretrained('facebook/rag-token-nq')
__snake_case : List[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
__snake_case : Any = tokenizer(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
@slow
def _lowercase (self : Optional[Any]) -> Dict:
__snake_case : List[str] = RagTokenizer.from_pretrained('facebook/rag-sequence-nq')
__snake_case : List[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
__snake_case : Any = tokenizer(SCREAMING_SNAKE_CASE_)
self.assertIsNotNone(SCREAMING_SNAKE_CASE_)
| 172 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a : Optional[Any] = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 147 | 0 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] )-> Union[str, Any]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(snake_case , '_dynamo' ):
return False
return isinstance(snake_case , torch._dynamo.eval_frame.OptimizedModule )
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] , snake_case : bool = True )-> List[str]:
_lowerCamelCase = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
_lowerCamelCase = is_compiled_module(snake_case )
if is_compiled:
_lowerCamelCase = model
_lowerCamelCase = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(snake_case , snake_case ):
_lowerCamelCase = model.module
if not keep_fpaa_wrapper:
_lowerCamelCase = getattr(snake_case , 'forward' )
_lowerCamelCase = model.__dict__.pop('_original_forward' , snake_case )
if original_forward is not None:
while hasattr(snake_case , '__wrapped__' ):
_lowerCamelCase = forward.__wrapped__
if forward == original_forward:
break
_lowerCamelCase = forward
if getattr(snake_case , '_converted_to_transformer_engine' , snake_case ):
convert_model(snake_case , to_transformer_engine=snake_case )
if is_compiled:
_lowerCamelCase = model
_lowerCamelCase = compiled_model
return model
def SCREAMING_SNAKE_CASE_ ( )-> Any:
PartialState().wait_for_everyone()
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] , snake_case : Optional[Any] )-> Any:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(snake_case , snake_case )
elif PartialState().local_process_index == 0:
torch.save(snake_case , snake_case )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( **snake_case : Any )-> List[Any]:
for key, value in kwargs.items():
_lowerCamelCase = str(snake_case )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[int] )-> Union[str, Any]:
if not hasattr(snake_case , '__qualname__' ) and not hasattr(snake_case , '__name__' ):
_lowerCamelCase = getattr(snake_case , '__class__' , snake_case )
if hasattr(snake_case , '__qualname__' ):
return obj.__qualname__
if hasattr(snake_case , '__name__' ):
return obj.__name__
return str(snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : List[str] , snake_case : int )-> List[str]:
for key, value in source.items():
if isinstance(snake_case , snake_case ):
_lowerCamelCase = destination.setdefault(snake_case , {} )
merge_dicts(snake_case , snake_case )
else:
_lowerCamelCase = value
return destination
def SCREAMING_SNAKE_CASE_ ( snake_case : int = None )-> bool:
if port is None:
_lowerCamelCase = 29_500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 80 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
A_ : Union[str, Any] =logging.get_logger(__name__)
class __a ( lowerCAmelCase__ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , a__ , )
super().__init__(*a__ , **a__ )
| 80 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : int ) -> Any:
lowerCamelCase__ : Optional[Any] = AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : str = AutoTokenizer.from_pretrained('google/mt5-small' )
lowerCamelCase__ : Any = tokenizer('Hello there' , return_tensors='pt' ).input_ids
lowerCamelCase__ : Tuple = tokenizer('Hi I am' , return_tensors='pt' ).input_ids
lowerCamelCase__ : Optional[int] = model(input_ids.to(UpperCAmelCase ) , labels=labels.to(UpperCAmelCase ) ).loss
lowerCamelCase__ : Dict = -(labels.shape[-1] * loss.item())
lowerCamelCase__ : Optional[int] = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 50 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : Optional[int] = []
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
F"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
F"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
F"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
F"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
F"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Tuple = []
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
F"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
F"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
F"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
F"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", F"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", F"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", F"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", F"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(F"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", F"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = []
token.append((F"""cvt.encoder.stages.{idx}.cls_token""", 'stage2.cls_token') )
return token
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : str = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
lowerCamelCase__ : Tuple = 'imagenet-1k-id2label.json'
lowerCamelCase__ : Union[str, Any] = 1000
lowerCamelCase__ : Optional[Any] = 'huggingface/label-files'
lowerCamelCase__ : Any = num_labels
lowerCamelCase__ : Dict = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
lowerCamelCase__ : int = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ : Tuple = idalabel
lowerCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
lowerCamelCase__ : List[str] = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
lowerCamelCase__ : List[Any] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
lowerCamelCase__ : Dict = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
lowerCamelCase__ : Optional[Any] = [2, 2, 20]
lowerCamelCase__ : Optional[int] = [3, 12, 16]
lowerCamelCase__ : str = [192, 768, 1024]
lowerCamelCase__ : Any = CvtForImageClassification(_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
lowerCamelCase__ : Tuple = image_size
lowerCamelCase__ : List[str] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
lowerCamelCase__ : Optional[int] = OrderedDict()
lowerCamelCase__ : Tuple = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
lowerCamelCase__ : Optional[Any] = list_of_state_dict + cls_token(_UpperCAmelCase )
lowerCamelCase__ : str = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
lowerCamelCase__ : str = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : int = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : str = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
_UpperCAmelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=3_84,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=R"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 50 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[str] = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'xlm-roberta'
def __init__(self , lowerCamelCase=30_522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3_072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=1e-12 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase="absolute" , lowerCamelCase=True , lowerCamelCase=None , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class __lowerCamelCase ( __lowercase ):
@property
def A__ (self ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 317 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
def __init__(self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase = size if size is not None else {"""shortest_edge""": 20}
_lowerCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = do_center_crop
_lowerCAmelCase = crop_size
def A__ (self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = MobileNetVaImageProcessingTester(self )
@property
def A__ (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase , """size""" ) )
self.assertTrue(hasattr(lowerCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase , """crop_size""" ) )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def A__ (self ):
'''simple docstring'''
pass
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 317 | 1 |
import os
def UpperCAmelCase_ ( ) -> Any:
__lowercase : Any = os.path.dirname(os.path.realpath(__lowerCAmelCase ) )
__lowercase : str = os.path.join(__lowerCAmelCase , '''triangle.txt''' )
with open(__lowerCAmelCase ) as f:
__lowercase : int = f.readlines()
__lowercase : Union[str, Any] = []
for line in triangle:
__lowercase : List[str] = []
for number in line.strip().split(''' ''' ):
numbers_from_line.append(int(__lowerCAmelCase ) )
a.append(__lowerCAmelCase )
for i in range(1 , len(__lowerCAmelCase ) ):
for j in range(len(a[i] ) ):
__lowercase : Tuple = a[i - 1][j] if j != len(a[i - 1] ) else 0
__lowercase : Union[str, Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__lowerCAmelCase , __lowerCAmelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 156 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase :Dict = pytest.mark.integration
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
__magic_name__ : Union[str, Any] = dset.map(
lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A )
__magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def __lowerCAmelCase ( self : Any ) -> str:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Tuple ) -> int:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__magic_name__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : int = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__magic_name__ : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_A )
__magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
import faiss
__magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__magic_name__ : str = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Optional[int] = 1
__magic_name__ , __magic_name__ : str = index.search(_A )
self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
__magic_name__ , __magic_name__ : str = index.search_batch(_A )
self.assertRaises(_A , index.search_batch , queries[0] )
__magic_name__ : List[Any] = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
import faiss
__magic_name__ : str = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__magic_name__ : str = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_A ):
__magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
import faiss
__magic_name__ : Any = faiss.IndexFlat(5 )
__magic_name__ : Optional[Any] = FaissIndex(custom_index=_A )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
import faiss
__magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
index.save(tmp_file.name )
__magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ : Dict = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Tuple = 1
__magic_name__ , __magic_name__ : Optional[Any] = index.search(_A )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
import faiss
__magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__magic_name__ : Dict = 'index.faiss'
__magic_name__ : Optional[Any] = f'mock://{index_name}'
index.save(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
__magic_name__ : List[str] = 1
__magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> Dict:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : Any = Elasticsearch()
__magic_name__ : Union[str, Any] = {'acknowledged': True}
__magic_name__ : Tuple = ElasticSearchIndex(es_client=_A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__magic_name__ : str = 'foo'
__magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__magic_name__ : str = 'foo'
__magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A )
__magic_name__ : Tuple = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A )
# batched queries with timeout
__magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 )
__magic_name__ : Optional[int] = [scores[0] for scores in total_scores]
__magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A ) | 331 | 0 |
'''simple docstring'''
def _snake_case ( A , A ) -> int:
return int((input_a, input_a).count(0 ) == 0 )
def _snake_case ( ) -> None:
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1)) | 228 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _snake_case ( A , A , A , A , ) -> list[float]:
lowerCAmelCase__ , lowerCAmelCase__ = coefficient_matrix.shape
lowerCAmelCase__ , lowerCAmelCase__ = constant_matrix.shape
if rowsa != colsa:
lowerCAmelCase__ = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(A )
if colsa != 1:
lowerCAmelCase__ = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(A )
if rowsa != rowsa:
lowerCAmelCase__ = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(A )
if len(A ) != rowsa:
lowerCAmelCase__ = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"""matrix but received {len(A )} and {rowsa}"""
)
raise ValueError(A )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
lowerCAmelCase__ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCAmelCase__ , lowerCAmelCase__ = table.shape
strictly_diagonally_dominant(A )
# Iterates the whole matrix for given number of times
for _ in range(A ):
lowerCAmelCase__ = []
for row in range(A ):
lowerCAmelCase__ = 0
for col in range(A ):
if col == row:
lowerCAmelCase__ = table[row][col]
elif col == cols - 1:
lowerCAmelCase__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCAmelCase__ = (temp + val) / denom
new_val.append(A )
lowerCAmelCase__ = new_val
return [float(A ) for i in new_val]
def _snake_case ( A ) -> bool:
lowerCAmelCase__ , lowerCAmelCase__ = table.shape
lowerCAmelCase__ = True
for i in range(0 , A ):
lowerCAmelCase__ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod() | 228 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( A__ ):
_lowercase : Union[str, Any] = '''encoder-decoder'''
_lowercase : List[str] = True
def __init__( self , **a) -> int:
super().__init__(**a)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE = kwargs.pop('encoder')
SCREAMING_SNAKE_CASE = encoder_config.pop('model_type')
SCREAMING_SNAKE_CASE = kwargs.pop('decoder')
SCREAMING_SNAKE_CASE = decoder_config.pop('model_type')
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE = AutoConfig.for_model(a , **a)
SCREAMING_SNAKE_CASE = AutoConfig.for_model(a , **a)
SCREAMING_SNAKE_CASE = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , a , a , **a) -> PretrainedConfig:
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **a)
def SCREAMING_SNAKE_CASE__ ( self) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE = self.encoder.to_dict()
SCREAMING_SNAKE_CASE = self.decoder.to_dict()
SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 137 |
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
a_ : Dict = 'hf-internal-testing/tiny-random-bert'
a_ : Tuple = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
a_ : Optional[int] = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6'
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
SCREAMING_SNAKE_CASE = cached_file(a , a)
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(a))
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(a , a)))
with open(os.path.join(a , 'refs' , 'main')) as f:
SCREAMING_SNAKE_CASE = f.read()
self.assertEqual(a , os.path.join(a , 'snapshots' , a , a))
self.assertTrue(os.path.isfile(a))
# File is cached at the same place the second time.
SCREAMING_SNAKE_CASE = cached_file(a , a)
self.assertEqual(a , a)
# Using a specific revision to test the full commit hash.
SCREAMING_SNAKE_CASE = cached_file(a , a , revision='9b8c223')
self.assertEqual(a , os.path.join(a , 'snapshots' , a , a))
def SCREAMING_SNAKE_CASE__ ( self) -> List[str]:
with self.assertRaisesRegex(a , 'is not a valid model identifier'):
SCREAMING_SNAKE_CASE = cached_file('tiny-random-bert' , a)
with self.assertRaisesRegex(a , 'is not a valid git identifier'):
SCREAMING_SNAKE_CASE = cached_file(a , a , revision='aaaa')
with self.assertRaisesRegex(a , 'does not appear to have a file named'):
SCREAMING_SNAKE_CASE = cached_file(a , 'conf')
def SCREAMING_SNAKE_CASE__ ( self) -> List[Any]:
with self.assertRaisesRegex(a , 'does not appear to have a file named'):
SCREAMING_SNAKE_CASE = cached_file(a , 'conf')
with open(os.path.join(a , 'refs' , 'main')) as f:
SCREAMING_SNAKE_CASE = f.read()
self.assertTrue(os.path.isfile(os.path.join(a , '.no_exist' , a , 'conf')))
SCREAMING_SNAKE_CASE = cached_file(a , 'conf' , _raise_exceptions_for_missing_entries=a)
self.assertIsNone(a)
SCREAMING_SNAKE_CASE = cached_file(a , 'conf' , local_files_only=a , _raise_exceptions_for_missing_entries=a)
self.assertIsNone(a)
SCREAMING_SNAKE_CASE = mock.Mock()
SCREAMING_SNAKE_CASE = 500
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = HTTPError
SCREAMING_SNAKE_CASE = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=a) as mock_head:
SCREAMING_SNAKE_CASE = cached_file(a , 'conf' , _raise_exceptions_for_connection_errors=a)
self.assertIsNone(a)
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE__ ( self) -> int:
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , a))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , a))
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , a))
def SCREAMING_SNAKE_CASE__ ( self) -> Any:
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt'))
# The function raises if the repository does not exist.
with self.assertRaisesRegex(a , 'is not a valid model identifier'):
get_file_from_repo('bert-base-case' , a)
# The function raises if the revision does not exist.
with self.assertRaisesRegex(a , 'is not a valid git identifier'):
get_file_from_repo('bert-base-cased' , a , revision='ahaha')
SCREAMING_SNAKE_CASE = get_file_from_repo('bert-base-cased' , a)
# The name is the cached name which is not very easy to test, so instead we load the content.
SCREAMING_SNAKE_CASE = json.loads(open(a , 'r').read())
self.assertEqual(config['hidden_size'] , 768)
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE = Path(a) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(a , 'a.txt') , str(a))
self.assertIsNone(get_file_from_repo(a , 'b.txt'))
| 137 | 1 |
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : int , __a : Any=13 , __a : Optional[int]=7 , __a : Optional[int]=True , __a : int=True , __a : Any=True , __a : List[str]=True , __a : Tuple=99 , __a : Optional[Any]=32 , __a : Optional[int]=5 , __a : List[Any]=4 , __a : str=37 , __a : str="gelu" , __a : List[str]=0.1 , __a : str=0.1 , __a : int=5_12 , __a : int=16 , __a : List[str]=2 , __a : List[str]=0.02 , __a : str=False , __a : List[str]=True , __a : int="None" , __a : List[str]=3 , __a : Tuple=4 , __a : Any=None , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_input_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = num_choices
_a = relative_attention
_a = position_biased_input
_a = pos_att_type
_a = scope
def UpperCamelCase__ ( self : Any ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_input_mask:
_a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = None
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_a = ids_tensor([self.batch_size] , self.num_choices )
_a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self : List[str] ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCamelCase__ ( self : Tuple , __a : int ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCamelCase__ ( self : Dict , __a : List[Any] , __a : List[Any] , __a : Tuple , __a : Any , __a : int , __a : Optional[int] , __a : Union[str, Any] ):
_a = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
_a = model(__a , attention_mask=__a , token_type_ids=__a )[0]
_a = model(__a , token_type_ids=__a )[0]
_a = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCamelCase__ ( self : Tuple , __a : str , __a : str , __a : Dict , __a : Dict , __a : Dict , __a : Tuple , __a : List[str] ):
_a = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
_a = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self : str , __a : Optional[int] , __a : Optional[Any] , __a : Dict , __a : List[Any] , __a : Union[str, Any] , __a : List[str] , __a : Dict ):
_a = self.num_labels
_a = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
_a = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def UpperCamelCase__ ( self : Dict , __a : Any , __a : str , __a : Any , __a : List[str] , __a : List[str] , __a : int , __a : List[str] ):
_a = self.num_labels
_a = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
_a = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self : Dict , __a : int , __a : Union[str, Any] , __a : Optional[int] , __a : List[Any] , __a : Any , __a : Union[str, Any] , __a : Tuple ):
_a = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
_a = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self : Dict , __a : Dict , __a : List[str] , __a : List[Any] , __a : Optional[Any] , __a : Optional[Any] , __a : Any , __a : List[str] ):
_a = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
_a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_a = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self : Tuple ):
_a = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__a =(
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a =True
__a =False
__a =False
__a =False
__a =False
def UpperCamelCase__ ( self : int ):
_a = DebertaVaModelTester(self )
_a = ConfigTester(self , config_class=__a , hidden_size=37 )
def UpperCamelCase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def UpperCamelCase__ ( self : List[str] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def UpperCamelCase__ ( self : str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def UpperCamelCase__ ( self : int ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def UpperCamelCase__ ( self : Tuple ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def UpperCamelCase__ ( self : Dict ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def UpperCamelCase__ ( self : Optional[int] ):
pass
@slow
def UpperCamelCase__ ( self : str ):
_a = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
_a = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_a = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
_a = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 370 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Any ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[str] , lowercase : Dict ) -> Union[str, Any]:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
elif metric == "loss":
_a = "{val_avg_loss:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Dict , lowercase : Dict ) -> str:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Tuple , __a : Optional[int] , __a : Any ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Tuple , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Dict=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : List[Any] , __a : Dict ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 346 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_lowerCAmelCase : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class __magic_name__ ( datasets.BuilderConfig ):
"""simple docstring"""
__UpperCamelCase = None
__UpperCamelCase = "utf-8"
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = True # deprecated
__UpperCamelCase = None # deprecated
__UpperCamelCase = 10 << 20 # 10MB
__UpperCamelCase = None
class __magic_name__ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__UpperCamelCase = JsonConfig
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
if self.config.block_size is not None:
logger.warning("The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead" )
A_ : List[str] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
"The JSON loader parameter `use_threads` is deprecated and doesn't have any effect anymore." )
if self.config.newlines_in_values is not None:
raise ValueError("The JSON loader parameter `newlines_in_values` is no longer supported" )
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Union[str, Any] ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
A_ : Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case , (str, list, tuple) ):
A_ : int = data_files
if isinstance(snake_case , snake_case ):
A_ : List[Any] = [files]
A_ : int = [dl_manager.iter_files(snake_case ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
A_ : List[str] = []
for split_name, files in data_files.items():
if isinstance(snake_case , snake_case ):
A_ : Optional[int] = [files]
A_ : Dict = [dl_manager.iter_files(snake_case ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case , gen_kwargs={"files": files} ) )
return splits
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :pa.Table ):
'''simple docstring'''
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
A_ : Optional[Any] = self.config.features.arrow_schema.field(snake_case ).type
A_ : int = pa_table.append_column(snake_case , pa.array([None] * len(snake_case ) , type=snake_case ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
A_ : Union[str, Any] = table_cast(snake_case , self.config.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Optional[int] ):
'''simple docstring'''
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : Optional[int] = json.load(snake_case )
# We keep only the field we are interested in
A_ : Tuple = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(snake_case , (list, tuple) ):
A_ : List[Any] = set().union(*[row.keys() for row in dataset] )
A_ : List[Any] = {col: [row.get(snake_case ) for row in dataset] for col in keys}
else:
A_ : Optional[Any] = dataset
A_ : List[Any] = pa.Table.from_pydict(snake_case )
yield file_idx, self._cast_table(snake_case )
# If the file has one json object per line
else:
with open(snake_case , "rb" ) as f:
A_ : List[Any] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
A_ : int = max(self.config.chunksize // 32 , 16 << 10 )
A_ : List[Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else "strict"
)
while True:
A_ : str = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(snake_case )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
A_ : Any = batch.decode(self.config.encoding , errors=snake_case ).encode("utf-8" )
try:
while True:
try:
A_ : int = paj.read_json(
io.BytesIO(snake_case ) , read_options=paj.ReadOptions(block_size=snake_case ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(snake_case , pa.ArrowInvalid )
and "straddling" not in str(snake_case )
or block_size > len(snake_case )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(snake_case )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}." )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
snake_case , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
A_ : List[Any] = json.load(snake_case )
except json.JSONDecodeError:
logger.error(f"Failed to read file '{file}' with error {type(snake_case )}: {e}" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(snake_case , snake_case ): # list is the only sequence type supported in JSON
try:
A_ : List[str] = set().union(*[row.keys() for row in dataset] )
A_ : Dict = {col: [row.get(snake_case ) for row in dataset] for col in keys}
A_ : Tuple = pa.Table.from_pydict(snake_case )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file '{file}' with error {type(snake_case )}: {e}" )
raise ValueError(f"Not able to read records in the JSON file at {file}." ) from None
yield file_idx, self._cast_table(snake_case )
break
else:
logger.error(f"Failed to read file '{file}' with error {type(snake_case )}: {e}" )
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys() ) )}. "
f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. " ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case )
batch_idx += 1
| 300 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowerCAmelCase : int = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_lowerCAmelCase : Tuple = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_lowerCAmelCase : int = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :List[List[List[str]]] , snake_case :List[List[str]] , snake_case :int = 1 , snake_case :int = 4 , ):
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=snake_case , hypotheses=snake_case , min_len=snake_case , max_len=snake_case )
}
| 300 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
__lowercase= AutoTokenizer.from_pretrained('google/mt5-small' )
__lowercase= tokenizer('Hello there' , return_tensors='pt' ).input_ids
__lowercase= tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__lowercase= model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
__lowercase= -(labels.shape[-1] * loss.item())
__lowercase= -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 304 |
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2 , lowerCAmelCase=9_9 , lowerCAmelCase=0 , lowerCAmelCase=3_2 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase="last" , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=0 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= seq_length
__lowercase= is_training
__lowercase= use_input_lengths
__lowercase= use_token_type_ids
__lowercase= use_labels
__lowercase= gelu_activation
__lowercase= sinusoidal_embeddings
__lowercase= causal
__lowercase= asm
__lowercase= n_langs
__lowercase= vocab_size
__lowercase= n_special
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_attention_heads
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_sequence_label_size
__lowercase= initializer_range
__lowercase= num_labels
__lowercase= num_choices
__lowercase= summary_type
__lowercase= use_proj
__lowercase= scope
__lowercase= bos_token_id
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_input_lengths:
__lowercase= (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , 2 ).float()
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _A (self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , lengths=lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase , langs=lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMWithLMHeadModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnsweringSimple(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
__lowercase= outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , p_mask=lowerCAmelCase , )
__lowercase= model(
lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase , cls_index=lowerCAmelCase , is_impossible=lowerCAmelCase , )
((__lowercase), )= result_with_labels.to_tuple()
__lowercase= model(lowerCAmelCase , start_positions=lowerCAmelCase , end_positions=lowerCAmelCase )
((__lowercase), )= result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= XLMForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_labels
__lowercase= XLMForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
__lowercase= self.num_choices
__lowercase= XLMForMultipleChoice(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase= model(
lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
(
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
), (
__lowercase
),
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : int =(
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict =(
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase_ : str =(
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
__lowercase= super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
__lowercase= torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase )
return inputs_dict
def _A (self ):
__lowercase= XLMModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , emb_dim=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCAmelCase )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_attentions in attentions] , [True] * len(lowerCAmelCase ) )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= min_length + idx + 1
__lowercase= (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCAmelCase ) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=1 ):
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(
[isinstance(lowerCAmelCase , lowerCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCAmelCase ) , )
self.assertEqual(len(lowerCAmelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCAmelCase ):
# adds PAD dummy token
__lowercase= min_length + idx + 1
__lowercase= (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCAmelCase ) , )
pass
@slow
def _A (self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= XLMModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(lowerCAmelCase )
__lowercase= torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=lowerCAmelCase ) # the president
__lowercase= [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__lowercase= model.generate(lowerCAmelCase , do_sample=lowerCAmelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCAmelCase )
| 304 | 1 |
"""simple docstring"""
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
lowercase__ : List[str] = logging.get_logger(__name__)
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : str = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), f"{len(__lowerCAmelCase )} != {len(__lowerCAmelCase )}"
dest_layers.load_state_dict(layers_to_copy.state_dict() )
lowercase__ : str = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
lowercase__ : Dict = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def UpperCamelCase_ ( lowerCAmelCase__ : str , lowerCAmelCase__ : Any ) -> int:
"""simple docstring"""
try:
lowerCAmelCase_ : Any = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"
f" {n_student}" )
return list(range(__lowerCAmelCase ) )
def UpperCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ) -> List[str]:
"""simple docstring"""
if n_student > n_teacher:
raise ValueError(f"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}" )
elif n_teacher == n_student:
return list(range(__lowerCAmelCase ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str = "student" , lowerCAmelCase__ : Dict = None , lowerCAmelCase__ : Any = None , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : str=None , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : List[str] , ) -> int:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
AutoTokenizer.from_pretrained(__lowerCAmelCase ).save_pretrained(__lowerCAmelCase ) # purely for convenience
lowerCAmelCase_ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase ).eval()
else:
assert isinstance(__lowerCAmelCase , __lowerCAmelCase ), f"teacher must be a model or string got type {type(__lowerCAmelCase )}"
lowerCAmelCase_ : Optional[int] = teacher.config.to_diff_dict()
try:
lowerCAmelCase_ : Any = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
lowerCAmelCase_ : Union[str, Any] = teacher_e
if d is None:
lowerCAmelCase_ : List[str] = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d} )
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers' ):
lowerCAmelCase_ : Dict = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
lowerCAmelCase_ : List[str] = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
lowerCAmelCase_ : Optional[int] = teacher_e
if d is None:
lowerCAmelCase_ : Optional[Any] = teacher_d
if hasattr(teacher.config , 'num_encoder_layers' ):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d} )
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowerCAmelCase )
# Copy weights
lowerCAmelCase_ : List[str] = teacher.config_class(**__lowerCAmelCase )
lowerCAmelCase_ : Dict = AutoModelForSeqaSeqLM.from_config(__lowerCAmelCase )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
lowerCAmelCase_ : Dict = student.load_state_dict(teacher.state_dict() , strict=__lowerCAmelCase )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
lowerCAmelCase_ : Dict = list(range(__lowerCAmelCase ) ), list(range(__lowerCAmelCase ) )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"
f" {save_path}" )
student.save_pretrained(__lowerCAmelCase )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
lowerCAmelCase_ : List[int] = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
if d_layers_to_copy is None:
lowerCAmelCase_ : List[int] = pick_layers_to_copy(__lowerCAmelCase , __lowerCAmelCase )
try:
if hasattr(
__lowerCAmelCase , 'prophetnet' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowerCAmelCase )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowerCAmelCase )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowerCAmelCase )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowerCAmelCase )
copy_layers(teacher.decoder.block , student.decoder.block , __lowerCAmelCase )
logger.info(
f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" )
lowerCAmelCase_ : Dict = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(__lowerCAmelCase )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 224 |
'''simple docstring'''
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ = logging.getLogger()
lowerCamelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[int] ) ->Tuple:
'''simple docstring'''
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = {"source": "What is love ?", "target": "life"}
_UpperCAmelCase : Any = {"train": 12, "val": 2, "test": 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCAmelCase : Dict = "\n".join([contents[field]] * n_lines[split] )
with open(os.path.join(lowerCamelCase__ , F"""{split}.{field}""" ) , "w" ) as f:
f.write(lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : int , lowerCamelCase__ : str = "pytorch" ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : int = os.path.join(lowerCamelCase__ , "output" )
_UpperCAmelCase : Tuple = os.path.join(lowerCamelCase__ , "data" )
self._create_dummy_data(data_dir=lowerCamelCase__ )
_UpperCAmelCase : str = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append("--fp16" )
else:
testargs.append("--gpus=0" )
testargs.append("--distributed_backend=ddp_cpu" )
testargs.append("--num_processes=2" )
_UpperCAmelCase : str = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(lowerCamelCase__ , env=self.get_env() )
_UpperCAmelCase : Optional[int] = os.path.join(lowerCamelCase__ , "metrics.json" )
with open(lowerCamelCase__ ) as f:
_UpperCAmelCase : Dict = json.load(lowerCamelCase__ )
return result
@require_torch_gpu
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_gpu
@require_ray
def lowerCAmelCase__ ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
@require_torch_multi_gpu
@require_ray
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self._run_finetune(gpus=1 , distributed_retriever="ray" )
self.assertGreaterEqual(result["test"][0]["test_avg_em"] , 0.2 )
| 234 | 0 |
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase__ (lowerCAmelCase_ = 3 ):
'''simple docstring'''
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(lowerCAmelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
__SCREAMING_SNAKE_CASE = QuantumRegister(lowerCAmelCase_ , "qr" )
__SCREAMING_SNAKE_CASE = ClassicalRegister(lowerCAmelCase_ , "cr" )
__SCREAMING_SNAKE_CASE = QuantumCircuit(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = number_of_qubits
for i in range(lowerCAmelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowerCAmelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowerCAmelCase_ , lowerCAmelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowerCAmelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowerCAmelCase_ , lowerCAmelCase_ )
# simulate with 10000 shots
__SCREAMING_SNAKE_CASE = Aer.get_backend("qasm_simulator" )
__SCREAMING_SNAKE_CASE = execute(lowerCAmelCase_ , lowerCAmelCase_ , shots=1_0000 )
return job.result().get_counts(lowerCAmelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 195 |
"""simple docstring"""
a__ : Optional[Any] = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = input("Enter message: " )
__SCREAMING_SNAKE_CASE = input("Enter key [alphanumeric]: " )
__SCREAMING_SNAKE_CASE = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
__SCREAMING_SNAKE_CASE = "encrypt"
__SCREAMING_SNAKE_CASE = encrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
elif mode.lower().startswith("d" ):
__SCREAMING_SNAKE_CASE = "decrypt"
__SCREAMING_SNAKE_CASE = decrypt_message(lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""\n{mode.title()}ed message:""" )
print(lowerCAmelCase_ )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , "encrypt" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return translate_message(lowerCAmelCase_ , lowerCAmelCase_ , "decrypt" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = key.upper()
for symbol in message:
__SCREAMING_SNAKE_CASE = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = 0
else:
translated.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 195 | 1 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_A = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
_A = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
_A = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
def __a ( self ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def __a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 1 , _UpperCamelCase = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=_UpperCamelCase , hypotheses=_UpperCamelCase , min_len=_UpperCamelCase , max_len=_UpperCamelCase )
}
| 231 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class _lowerCAmelCase ( pl.LightningModule ):
def __init__( self , _UpperCamelCase ) -> List[str]:
super().__init__()
lowerCAmelCase_ = model
lowerCAmelCase_ = 2
lowerCAmelCase_ = nn.Linear(self.model.config.hidden_size , self.num_labels )
def __a ( self ) -> Tuple:
pass
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = LongformerModel.from_pretrained(__lowerCAmelCase )
lowerCAmelCase_ = LightningModel(__lowerCAmelCase )
lowerCAmelCase_ = torch.load(__lowerCAmelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
lowerCAmelCase_ = LongformerForQuestionAnswering.from_pretrained(__lowerCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__lowerCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_A = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 231 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__UpperCAmelCase :Any = TypeVar("T")
__UpperCAmelCase :str = TypeVar("U")
class a ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case : T | None , snake_case : U | None ) -> Tuple:
__UpperCAmelCase : str = key
__UpperCAmelCase : Optional[Any] = val
__UpperCAmelCase : DoubleLinkedListNode[T, U] | None = None
__UpperCAmelCase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : int ) -> str:
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class a ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Optional[int] ) -> None:
__UpperCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case , snake_case )
__UpperCAmelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case , snake_case )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = self.rear, self.head
def __repr__( self : Tuple ) -> str:
__UpperCAmelCase : Optional[int] = ['''DoubleLinkedList''']
__UpperCAmelCase : List[str] = self.head
while node.next is not None:
rep.append(str(snake_case ) )
__UpperCAmelCase : Optional[int] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case )
def lowerCamelCase__ ( self : Any , snake_case : DoubleLinkedListNode[T, U] ) -> None:
__UpperCAmelCase : List[Any] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__UpperCAmelCase : Tuple = node
__UpperCAmelCase : List[str] = previous
__UpperCAmelCase : Union[str, Any] = node
__UpperCAmelCase : List[str] = self.rear
def lowerCamelCase__ ( self : Tuple , snake_case : DoubleLinkedListNode[T, U] ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
__UpperCAmelCase : List[Any] = node.next
__UpperCAmelCase : Optional[Any] = node.prev
__UpperCAmelCase : List[Any] = None
__UpperCAmelCase : Any = None
return node
class a ( Generic[T, U] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[str] , snake_case : int ) -> List[str]:
__UpperCAmelCase : DoubleLinkedList[T, U] = DoubleLinkedList()
__UpperCAmelCase : int = capacity
__UpperCAmelCase : int = 0
__UpperCAmelCase : Any = 0
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : List[str] ) -> str:
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : Union[str, Any] , snake_case : T ) -> bool:
return key in self.cache
def lowerCamelCase__ ( self : Optional[int] , snake_case : T ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
__UpperCAmelCase : DoubleLinkedListNode[T, U] = self.cache[key]
__UpperCAmelCase : List[str] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case )
return node.val
self.miss += 1
return None
def lowerCamelCase__ ( self : Optional[int] , snake_case : T , snake_case : U ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__UpperCAmelCase : List[str] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__UpperCAmelCase : Optional[Any] = DoubleLinkedListNode(snake_case , snake_case )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
__UpperCAmelCase : Any = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
__UpperCAmelCase : Dict = value
self.list.add(snake_case )
@classmethod
def lowerCamelCase__ ( cls : Dict , snake_case : int = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(snake_case : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
__UpperCAmelCase : Optional[int] = LRUCache(snake_case )
__UpperCAmelCase : str = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
__UpperCAmelCase : int = func(*snake_case )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case , '''cache_info''' , snake_case ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod() | 240 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__UpperCAmelCase :Dict = logging.get_logger(__name__)
__UpperCAmelCase :str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase :Optional[int] = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase :Union[str, Any] = {
"bert-base-uncased": 5_1_2,
"bert-large-uncased": 5_1_2,
"bert-base-cased": 5_1_2,
"bert-large-cased": 5_1_2,
"bert-base-multilingual-uncased": 5_1_2,
"bert-base-multilingual-cased": 5_1_2,
"bert-base-chinese": 5_1_2,
"bert-base-german-cased": 5_1_2,
"bert-large-uncased-whole-word-masking": 5_1_2,
"bert-large-cased-whole-word-masking": 5_1_2,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_1_2,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_1_2,
"bert-base-cased-finetuned-mrpc": 5_1_2,
"bert-base-german-dbmdz-cased": 5_1_2,
"bert-base-german-dbmdz-uncased": 5_1_2,
"TurkuNLP/bert-base-finnish-cased-v1": 5_1_2,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_1_2,
"wietsedv/bert-base-dutch-cased": 5_1_2,
}
__UpperCAmelCase :str = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = BertTokenizer
def __init__( self : Optional[int] , snake_case : Union[str, Any]=None , snake_case : str=None , snake_case : Any=True , snake_case : Tuple="[UNK]" , snake_case : int="[SEP]" , snake_case : Optional[Any]="[PAD]" , snake_case : int="[CLS]" , snake_case : Optional[Any]="[MASK]" , snake_case : Union[str, Any]=True , snake_case : List[Any]=None , **snake_case : int , ) -> str:
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
__UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case ) != tokenize_chinese_chars
):
__UpperCAmelCase : List[Any] = getattr(snake_case , normalizer_state.pop('''type''' ) )
__UpperCAmelCase : List[Any] = do_lower_case
__UpperCAmelCase : List[Any] = strip_accents
__UpperCAmelCase : str = tokenize_chinese_chars
__UpperCAmelCase : List[str] = normalizer_class(**snake_case )
__UpperCAmelCase : Optional[int] = do_lower_case
def lowerCamelCase__ ( self : List[Any] , snake_case : Tuple , snake_case : List[str]=None ) -> str:
__UpperCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : List[str] = [self.sep_token_id]
__UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : List[Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case ) | 240 | 1 |
_snake_case = [
(1000, "M"),
(900, "CM"),
(500, "D"),
(400, "CD"),
(100, "C"),
(90, "XC"),
(50, "L"),
(40, "XL"),
(10, "X"),
(9, "IX"),
(5, "V"),
(4, "IV"),
(1, "I"),
]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Optional[int] = 0
while place < len(_lowerCamelCase ):
if (place + 1 < len(_lowerCamelCase )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = []
for arabic, roman in ROMAN:
((_lowerCAmelCase) , (_lowerCAmelCase)) : List[str] = divmod(_lowerCamelCase , _lowerCamelCase )
result.append(roman * factor )
if number == 0:
break
return "".join(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 | import math
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
return math.sqrt(__A ) * math.sqrt(__A ) == num
def lowerCAmelCase_ ( __A ) -> bool:
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = n
while left <= right:
UpperCAmelCase__ = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
UpperCAmelCase__ = mid - 1
else:
UpperCAmelCase__ = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 366 |
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = TransfoXLTokenizer
__snake_case : Tuple = False
__snake_case : List[Any] = False
def UpperCamelCase ( self: int ):
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
_SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCamelCase ( self: Any , **UpperCAmelCase_: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """<unk> UNwanted , running"""
_SCREAMING_SNAKE_CASE = """<unk> unwanted, running"""
return input_text, output_text
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(UpperCAmelCase_ , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [0, 4, 8, 7] )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TransfoXLTokenizer(lower_case=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
_SCREAMING_SNAKE_CASE = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(tokenizer.convert_tokens_to_string(UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.get_tokenizer()
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(UpperCAmelCase_ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 125 | 0 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = iter(lowerCamelCase)
while True:
__lowerCAmelCase = tuple(itertools.islice(lowerCamelCase, lowerCamelCase))
if not chunk:
return
yield chunk
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters])
__lowerCAmelCase = ''''''
if len(lowerCamelCase) < 2:
return dirty
for i in range(len(lowerCamelCase) - 1):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowerCamelCase) & 1:
clean += "X"
return clean
def __magic_name__( lowerCamelCase):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
__lowerCAmelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ'''
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
__lowerCAmelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowerCamelCase)
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowerCamelCase)
return table
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = generate_table(lowerCamelCase)
__lowerCAmelCase = prepare_input(lowerCamelCase)
__lowerCAmelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase, 2):
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(lowerCamelCase), 5)
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(lowerCamelCase), 5)
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = generate_table(lowerCamelCase)
__lowerCAmelCase = ''''''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase, 2):
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(lowerCamelCase), 5)
__lowerCAmelCase , __lowerCAmelCase = divmod(table.index(lowerCamelCase), 5)
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 174 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case (self ):
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=__lowercase ).to(__lowercase )
__lowerCAmelCase = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__lowerCAmelCase = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__lowerCAmelCase = model(input_ids.to(__lowercase ) , labels=labels.to(__lowercase ) ).loss
__lowerCAmelCase = -(labels.shape[-1] * loss.item())
__lowerCAmelCase = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 174 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'char'
A_ : List[Any] = 'bpe'
A_ : Dict = 'wp'
snake_case_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Tuple = ['image_processor', 'char_tokenizer']
A_ : Union[str, Any] = 'ViTImageProcessor'
A_ : Optional[int] = 'MgpstrTokenizer'
def __init__(self : List[Any] , a__ : List[Any]=None , a__ : Optional[int]=None , **a__ : List[Any] ):
"""simple docstring"""
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a__ , )
__snake_case = kwargs.pop('''feature_extractor''' )
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
__snake_case = tokenizer
__snake_case = AutoTokenizer.from_pretrained('''gpt2''' )
__snake_case = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(a__ , a__ )
def __call__(self : Optional[Any] , a__ : Union[str, Any]=None , a__ : Any=None , a__ : Optional[Any]=None , **a__ : int ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__snake_case = self.image_processor(a__ , return_tensors=a__ , **a__ )
if text is not None:
__snake_case = self.char_tokenizer(a__ , return_tensors=a__ , **a__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
__snake_case = encodings['''input_ids''']
return inputs
def a (self : Any , a__ : int ):
"""simple docstring"""
__snake_case , __snake_case , __snake_case = sequences
__snake_case = char_preds.size(0 )
__snake_case , __snake_case = self._decode_helper(a__ , '''char''' )
__snake_case , __snake_case = self._decode_helper(a__ , '''bpe''' )
__snake_case , __snake_case = self._decode_helper(a__ , '''wp''' )
__snake_case = []
__snake_case = []
for i in range(a__ ):
__snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]]
__snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]]
__snake_case = scores.index(max(a__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__snake_case = {}
__snake_case = final_strs
__snake_case = final_scores
__snake_case = char_strs
__snake_case = bpe_strs
__snake_case = wp_strs
return out
def a (self : List[Any] , a__ : Optional[int] , a__ : str ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
__snake_case = self.char_decode
__snake_case = 1
__snake_case = '''[s]'''
elif format == DecodeType.BPE:
__snake_case = self.bpe_decode
__snake_case = 2
__snake_case = '''#'''
elif format == DecodeType.WORDPIECE:
__snake_case = self.wp_decode
__snake_case = 102
__snake_case = '''[SEP]'''
else:
raise ValueError(f"""Format {format} is not supported.""" )
__snake_case , __snake_case = [], []
__snake_case = pred_logits.size(0 )
__snake_case = pred_logits.size(1 )
__snake_case , __snake_case = pred_logits.topk(1 , dim=-1 , largest=a__ , sorted=a__ )
__snake_case = preds_index.view(-1 , a__ )[:, 1:]
__snake_case = decoder(a__ )
__snake_case , __snake_case = torch.nn.functional.softmax(a__ , dim=2 ).max(dim=2 )
__snake_case = preds_max_prob[:, 1:]
for index in range(a__ ):
__snake_case = preds_str[index].find(a__ )
__snake_case = preds_str[index][:pred_eos]
__snake_case = preds_index[index].cpu().tolist()
__snake_case = pred_index.index(a__ ) if eos_token in pred_index else -1
__snake_case = preds_max_prob[index][: pred_eos_index + 1]
__snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a__ )
conf_scores.append(a__ )
return dec_strs, conf_scores
def a (self : List[Any] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(a__ )]
return decode_strs
def a (self : Any , a__ : Optional[int] ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(a__ )
def a (self : Optional[Any] , a__ : str ):
"""simple docstring"""
__snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(a__ )]
return decode_strs
| 365 |
from pathlib import Path
import fire
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str , snake_case_ : int ) -> str:
__snake_case = Path(snake_case_ )
__snake_case = Path(snake_case_ )
dest_dir.mkdir(exist_ok=snake_case_ )
for path in src_dir.iterdir():
__snake_case = [x.rstrip() for x in list(path.open().readlines() )][:n]
__snake_case = dest_dir.joinpath(path.name )
print(snake_case_ )
dest_path.open('''w''' ).write('''\n'''.join(snake_case_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 238 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.