code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( _lowerCamelCase):
A_ : Any = ['pixel_values']
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 2_55 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 2_24}
__lowerCAmelCase : Optional[Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
__lowerCAmelCase : Any = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size' )
__lowerCAmelCase : Any = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Union[str, Any] = resample
__lowerCAmelCase : int = do_center_crop
__lowerCAmelCase : Union[str, Any] = crop_size
__lowerCAmelCase : List[Any] = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : int = do_normalize
__lowerCAmelCase : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCAmelCase : List[str] = int((2_56 / 2_24) * size['shortest_edge'] )
__lowerCAmelCase : Union[str, Any] = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}" )
return resize(
_SCREAMING_SNAKE_CASE , size=(size_dict['height'], size_dict['width']) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Dict = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"Size dict must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['height'], size['width']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase : List[str] = resample if resample is not None else self.resample
__lowerCAmelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase : str = image_std if image_std is not None else self.image_std
__lowerCAmelCase : List[str] = size if size is not None else self.size
__lowerCAmelCase : Union[str, Any] = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase : str = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='crop_size' )
__lowerCAmelCase : Union[str, Any] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase : Optional[int] = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
__lowerCAmelCase : Dict = [self.resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
__lowerCAmelCase : Optional[int] = [self.center_crop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
__lowerCAmelCase : Dict = [self.rescale(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
__lowerCAmelCase : Optional[int] = [self.normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
__lowerCAmelCase : Dict = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
__lowerCAmelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[Any] = 7
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = 99
__lowerCAmelCase : int = 3_84
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : str = 37
__lowerCAmelCase : Any = 'gelu'
__lowerCAmelCase : List[str] = 0.1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : Union[str, Any] = 5_12
__lowerCAmelCase : int = 16
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : int = 0.02
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : Tuple = 1_28
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[str] = 9
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = None
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCAmelCase : Tuple = [input_ids, input_mask]
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = self.num_labels
__lowerCAmelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = self.num_choices
__lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : Any = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : List[str] = config_and_inputs
__lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : List[Any] = False
A_ : str = False
A_ : List[Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = True
if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ):
__lowerCAmelCase : int = True
__lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' )
__lowerCAmelCase : int = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : List[str] = outputs['encoder_hidden_states']
__lowerCAmelCase : Tuple = outputs['encoder_attentions']
else:
__lowerCAmelCase : Optional[int] = outputs['hidden_states']
__lowerCAmelCase : Tuple = outputs['attentions']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : Tuple = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) | 86 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase_ :
"""simple docstring"""
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ) -> str:
return None
class UpperCamelCase_ :
"""simple docstring"""
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
return None
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Any = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase__ , "tf" , 1_2 , **UpperCAmelCase__ )
@require_torch
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCAmelCase__ , "pt" , 1_2 , **UpperCAmelCase__ )
@require_torch
@slow
def UpperCAmelCase_ ( self : List[str] ) -> int:
from transformers import BertModel
__SCREAMING_SNAKE_CASE = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(UpperCAmelCase__ ) )
vocab_file.flush()
__SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(UpperCAmelCase__ ) ) )
model.save_pretrained(UpperCAmelCase__ )
self._test_export(UpperCAmelCase__ , "pt" , 1_2 , UpperCAmelCase__ )
@require_tf
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__SCREAMING_SNAKE_CASE = self._test_export(UpperCAmelCase__ , "tf" , 1_2 , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = quantize(Path(UpperCAmelCase__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase__ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def UpperCAmelCase_ ( self : int ) -> Tuple:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__SCREAMING_SNAKE_CASE = self._test_export(UpperCAmelCase__ , "pt" , 1_2 , **UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = quantize(UpperCAmelCase__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCAmelCase__ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str=None , **UpperCAmelCase__ : str ) -> Tuple:
try:
# Compute path
with TemporaryDirectory() as tempdir:
__SCREAMING_SNAKE_CASE = Path(UpperCAmelCase__ ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
return path
except Exception as e:
self.fail(UpperCAmelCase__ )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase_ ( self : int ) -> str:
from transformers import BertModel
__SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
__SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(UpperCAmelCase__ , UpperCAmelCase__ , "pt" )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
from transformers import TFBertModel
__SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
__SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(UpperCAmelCase__ , UpperCAmelCase__ , "tf" )
def UpperCAmelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(UpperCAmelCase__ , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = infer_shapes(UpperCAmelCase__ , UpperCAmelCase__ )
# Assert all variables are present
self.assertEqual(len(UpperCAmelCase__ ) , len(UpperCAmelCase__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , UpperCAmelCase__ )
self.assertSequenceEqual(variable_names[3:] , UpperCAmelCase__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask", "token_type_ids"]
__SCREAMING_SNAKE_CASE = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , UpperCAmelCase__ , UpperCAmelCase__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCAmelCase__ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(UpperCAmelCase__ ) , set(UpperCAmelCase__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCAmelCase__ , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , UpperCAmelCase__ , UpperCAmelCase__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCAmelCase__ ) , 1 )
self.assertEqual(len(UpperCAmelCase__ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def UpperCAmelCase_ ( self : Tuple ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 195 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowerCAmelCase_ ) , lowerCAmelCase_ )
return number - int(lowerCAmelCase_ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 195 | 1 |
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase = None):
'''simple docstring'''
__A : Optional[Any] = value
__A : Node | None = None # Added in order to delete a node easier
__A : Node | None = None
__A : Node | None = None
def __repr__( self):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value)
return pformat({F'{self.value}': (self.left, self.right)} , indent=1)
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase = None):
'''simple docstring'''
__A : List[str] = root
def __str__( self):
'''simple docstring'''
return str(self.root)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if new_children is not None: # reset its kids
__A : Optional[int] = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_UpperCAmelCase): # If it is the right children
__A : str = new_children
else:
__A : List[Any] = new_children
else:
__A : int = new_children
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return self.root is None
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = Node(_UpperCAmelCase) # create a new Node
if self.empty(): # if Tree is empty
__A : List[Any] = new_node # set its root
else: # Tree is not empty
__A : Any = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__A : Optional[Any] = new_node # We insert the new node in a leaf
break
else:
__A : List[Any] = parent_node.left
else:
if parent_node.right is None:
__A : Any = new_node
break
else:
__A : Optional[Any] = parent_node.right
__A : str = parent_node
def SCREAMING_SNAKE_CASE ( self , *_UpperCAmelCase):
'''simple docstring'''
for value in values:
self.__insert(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.empty():
raise IndexError('Warning: Tree is empty! please use another.')
else:
__A : Dict = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__A : Tuple = node.left if value < node.value else node.right
return node
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase = None):
'''simple docstring'''
if node is None:
if self.root is None:
return None
__A : Dict = self.root
if not self.empty():
while node.right is not None:
__A : Optional[int] = node.right
return node
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase = None):
'''simple docstring'''
if node is None:
__A : Optional[Any] = self.root
if self.root is None:
return None
if not self.empty():
__A : List[str] = self.root
while node.left is not None:
__A : int = node.left
return node
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Any = self.search(_UpperCAmelCase) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_UpperCAmelCase , _UpperCAmelCase)
elif node.left is None: # Has only right children
self.__reassign_nodes(_UpperCAmelCase , node.right)
elif node.right is None: # Has only left children
self.__reassign_nodes(_UpperCAmelCase , node.left)
else:
__A : Optional[int] = self.get_max(
node.left) # Gets the max value of the left branch
self.remove(tmp_node.value) # type: ignore
__A : Any = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left)
yield from self.preorder_traverse(node.right)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=None):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root)
else:
return traversal_function(self.root)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
if node:
self.inorder(_UpperCAmelCase , node.left)
arr.append(node.value)
self.inorder(_UpperCAmelCase , node.right)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : list[int] = []
self.inorder(_UpperCAmelCase , _UpperCAmelCase) # append all values to list using inorder traversal
return arr[k - 1]
def _lowerCAmelCase ( __snake_case : Node | None ) -> list[Node]:
__A : str = []
if curr_node is not None:
__A : Union[str, Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def _lowerCAmelCase ( ) -> None:
__A : str = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__A : Tuple = BinarySearchTree()
for i in testlist:
t.insert(__snake_case )
# Prints all the elements of the list in order traversal
print(__snake_case )
if t.search(6 ) is not None:
print('The value 6 exists' )
else:
print('The value 6 doesn\'t exist' )
if t.search(-1 ) is not None:
print('The value -1 exists' )
else:
print('The value -1 doesn\'t exist' )
if not t.empty():
print('Max Value: ' , t.get_max().value ) # type: ignore
print('Min Value: ' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__snake_case )
print(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 190 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = tempfile.mkdtemp()
__A : Optional[int] = BlipImageProcessor()
__A : List[str] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-BertModel')
__A : Any = BlipProcessor(_UpperCAmelCase , _UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase).tokenizer
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase).image_processor
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__A : Union[str, Any] = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__A : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
__A : List[Any] = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0)
__A : Tuple = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : int = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : List[str] = self.prepare_image_inputs()
__A : str = image_processor(_UpperCAmelCase , return_tensors='np')
__A : str = processor(images=_UpperCAmelCase , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_image_processor()
__A : Tuple = self.get_tokenizer()
__A : Union[str, Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : str = 'lower newer'
__A : Dict = processor(text=_UpperCAmelCase)
__A : Tuple = tokenizer(_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Optional[Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Dict = 'lower newer'
__A : int = self.prepare_image_inputs()
__A : List[Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask'])
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase):
processor()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : List[str] = processor.batch_decode(_UpperCAmelCase)
__A : List[str] = tokenizer.batch_decode(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Optional[Any] = BlipProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase)
__A : str = 'lower newer'
__A : str = self.prepare_image_inputs()
__A : Optional[int] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase)
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys()) , ['pixel_values', 'input_ids', 'attention_mask']) | 190 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Any=7 , _lowercase :List[str]=3 , _lowercase :List[Any]=18 , _lowercase :List[Any]=30 , _lowercase :Union[str, Any]=4_00 , _lowercase :str=True , _lowercase :Optional[int]=None , _lowercase :Optional[int]=True , _lowercase :Any=None , ):
'''simple docstring'''
lowercase__ = size if size is not None else {"shortest_edge": 20}
lowercase__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowerCAmelCase ( lowercase_ , unittest.TestCase ):
__lowerCamelCase = MobileNetVaImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self :str ):
'''simple docstring'''
lowercase__ = MobileNetVaImageProcessingTester(self )
@property
def UpperCAmelCase ( self :str ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , "do_resize" ) )
self.assertTrue(hasattr(_lowercase , "size" ) )
self.assertTrue(hasattr(_lowercase , "do_center_crop" ) )
self.assertTrue(hasattr(_lowercase , "crop_size" ) )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
pass
def UpperCAmelCase ( self :int ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ = image_processing(_lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ = image_processing(_lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowercase__ = image_processing(_lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 201 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Whether tp freeze the encoder.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__lowerCamelCase = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
__lowerCamelCase = field(
default=1_024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
__lowerCamelCase = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
__lowerCamelCase = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
__lowerCamelCase = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Source language id for translation.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': 'Target language id for translation.'} )
__lowerCamelCase = field(default=lowercase_ , metadata={'help': '# num_beams to use for evaluation.'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(__magic_name__ , os.path.join(__magic_name__ , f'''{split}_results.json''' ) )
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
check_output_dir(__magic_name__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , __magic_name__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(__magic_name__ , __magic_name__ , __magic_name__ ):
assert hasattr(__magic_name__ , __magic_name__ ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(__magic_name__ , __magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
lowercase__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=__magic_name__ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__magic_name__ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowercase__ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__magic_name__ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowercase__ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__magic_name__ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowercase__ = SeqaSeqDataset
# Get datasets
lowercase__ = (
dataset_class(
__magic_name__ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
lowercase__ = (
dataset_class(
__magic_name__ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowercase__ = (
dataset_class(
__magic_name__ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowercase__ = (
build_compute_metrics_fn(data_args.task , __magic_name__ ) if training_args.predict_with_generate else None
)
lowercase__ = SeqaSeqTrainer(
model=__magic_name__ , args=__magic_name__ , data_args=__magic_name__ , train_dataset=__magic_name__ , eval_dataset=__magic_name__ , data_collator=SeqaSeqDataCollator(
__magic_name__ , __magic_name__ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__magic_name__ , tokenizer=__magic_name__ , )
lowercase__ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
lowercase__ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowercase__ = train_result.metrics
lowercase__ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , __magic_name__ , training_args.output_dir )
all_metrics.update(__magic_name__ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase__ = trainer.evaluate(metric_key_prefix="val" )
lowercase__ = data_args.n_val
lowercase__ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , __magic_name__ , training_args.output_dir )
all_metrics.update(__magic_name__ )
if training_args.do_predict:
logger.info("*** Predict ***" )
lowercase__ = trainer.predict(test_dataset=__magic_name__ , metric_key_prefix="test" )
lowercase__ = test_output.metrics
lowercase__ = data_args.n_test
if trainer.is_world_process_zero():
lowercase__ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , __magic_name__ , training_args.output_dir )
all_metrics.update(__magic_name__ )
if training_args.predict_with_generate:
lowercase__ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__magic_name__ , clean_up_tokenization_spaces=__magic_name__ )
lowercase__ = lmap(str.strip , __magic_name__ )
write_txt_file(__magic_name__ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(__magic_name__ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def _A ( __magic_name__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 201 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self : str ):
__lowercase = 1
__lowercase = 3
__lowercase = (3_2, 3_2)
__lowercase = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(UpperCAmelCase__ )
return image
@property
def _lowercase ( self : Optional[Any] ):
torch.manual_seed(0 )
__lowercase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=3_2, )
return model
@property
def _lowercase ( self : Optional[Any] ):
torch.manual_seed(0 )
__lowercase = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, )
return model
@property
def _lowercase ( self : Dict ):
torch.manual_seed(0 )
__lowercase = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, )
return CLIPTextModel(UpperCAmelCase__ )
@property
def _lowercase ( self : str ):
def extract(*UpperCAmelCase__ : Tuple, **UpperCAmelCase__ : str ):
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : int ):
__lowercase = torch.ones([0] )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Dict ):
self.pixel_values.to(UpperCAmelCase__ )
return self
return Out()
return extract
def _lowercase ( self : Optional[Any] ):
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.dummy_cond_unet
__lowercase = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=UpperCAmelCase__, set_alpha_to_one=UpperCAmelCase__, )
__lowercase = self.dummy_vae
__lowercase = self.dummy_text_encoder
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
__lowercase = StableDiffusionPipeline(
unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=self.dummy_extractor, )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A painting of a squirrel eating a burger"
__lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
__lowercase = sd_pipe([prompt], generator=UpperCAmelCase__, guidance_scale=6.0, num_inference_steps=2, output_type="np" )
__lowercase = output.images
__lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=UpperCAmelCase__, )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Union[str, Any] ):
__lowercase = "cpu" # ensure determinism for the device-dependent torch.Generator
__lowercase = self.dummy_cond_unet
__lowercase = PNDMScheduler(skip_prk_steps=UpperCAmelCase__ )
__lowercase = self.dummy_vae
__lowercase = self.dummy_text_encoder
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
__lowercase = StableDiffusionPipeline(
unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=self.dummy_extractor, )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A painting of a squirrel eating a burger"
__lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
__lowercase = sd_pipe([prompt], generator=UpperCAmelCase__, guidance_scale=6.0, num_inference_steps=2, output_type="np" )
__lowercase = output.images
__lowercase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=UpperCAmelCase__, )[0]
__lowercase = image[0, -3:, -3:, -1]
__lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowercase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : int ):
__lowercase = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe", safety_checker=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__, UpperCAmelCase__ )
assert isinstance(pipe.scheduler, UpperCAmelCase__ )
assert pipe.safety_checker is None
__lowercase = pipe("example prompt", num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(UpperCAmelCase__ )
__lowercase = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase = pipe("example prompt", num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda", "This test requires a GPU" )
def _lowercase ( self : str ):
__lowercase = self.dummy_cond_unet
__lowercase = PNDMScheduler(skip_prk_steps=UpperCAmelCase__ )
__lowercase = self.dummy_vae
__lowercase = self.dummy_text_encoder
__lowercase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
__lowercase = unet.half()
__lowercase = vae.half()
__lowercase = bert.half()
# make sure here that pndm scheduler skips prk
__lowercase = StableDiffusionPipeline(
unet=UpperCAmelCase__, scheduler=UpperCAmelCase__, vae=UpperCAmelCase__, text_encoder=UpperCAmelCase__, tokenizer=UpperCAmelCase__, safety_checker=UpperCAmelCase__, feature_extractor=self.dummy_extractor, )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "A painting of a squirrel eating a burger"
__lowercase = sd_pipe([prompt], num_inference_steps=2, output_type="np" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Dict ):
__lowercase = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=UpperCAmelCase__ )
__lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
__lowercase = 4_0_0_3_6_6_0_3_4_6
__lowercase = 7
# without safety guidance (sld_guidance_scale = 0)
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, num_inference_steps=5_0, output_type="np", width=5_1_2, height=5_1_2, sld_guidance_scale=0, )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
__lowercase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, num_inference_steps=5_0, output_type="np", width=5_1_2, height=5_1_2, sld_guidance_scale=2_0_0_0, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
__lowercase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : str ):
__lowercase = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=UpperCAmelCase__ )
__lowercase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = "padme amidala taking a bath artwork, safe for work, no nudity"
__lowercase = 2_7_3_4_9_7_1_7_5_5
__lowercase = 7
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, num_inference_steps=5_0, output_type="np", width=5_1_2, height=5_1_2, sld_guidance_scale=0, )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
__lowercase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, num_inference_steps=5_0, output_type="np", width=5_1_2, height=5_1_2, sld_guidance_scale=2_0_0_0, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
__lowercase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Tuple ):
__lowercase = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
__lowercase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
__lowercase = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
__lowercase = 1_0_4_4_3_5_5_2_3_4
__lowercase = 1_2
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, num_inference_steps=5_0, output_type="np", width=5_1_2, height=5_1_2, sld_guidance_scale=0, )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
__lowercase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__lowercase = torch.manual_seed(UpperCAmelCase__ )
__lowercase = sd_pipe(
[prompt], generator=UpperCAmelCase__, guidance_scale=UpperCAmelCase__, num_inference_steps=5_0, output_type="np", width=5_1_2, height=5_1_2, sld_guidance_scale=2_0_0_0, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
__lowercase = output.images
__lowercase = image[0, -3:, -3:, -1]
__lowercase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 17 |
'''simple docstring'''
def A_ ( snake_case ):
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
SCREAMING_SNAKE_CASE:Optional[int] = sorted(string.lower() )
return len(snake_case ) == len(set(snake_case ) )
if __name__ == "__main__":
A_ = input("Enter a string ").strip()
A_ = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 139 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any]=2_81_23 ):
'''simple docstring'''
lowerCamelCase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowerCamelCase_ = set()
lowerCamelCase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 208 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = "Hello world! cécé herlolip"
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : str , lowercase : bool ):
'''simple docstring'''
lowerCamelCase_ = FairseqRobertaModel.from_pretrained(lowercase )
roberta.eval() # disable dropout
lowerCamelCase_ = roberta.model.encoder.sentence_encoder
lowerCamelCase_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_14 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , lowercase )
lowerCamelCase_ = XLMRobertaXLForSequenceClassification(lowercase ) if classification_head else XLMRobertaXLForMaskedLM(lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ = roberta_sent_encoder.embed_tokens.weight
lowerCamelCase_ = roberta_sent_encoder.embed_positions.weight
lowerCamelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowerCamelCase_ = roberta_sent_encoder.layer_norm.weight
lowerCamelCase_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ = model.roberta.encoder.layer[i]
lowerCamelCase_ = roberta_sent_encoder.layers[i]
lowerCamelCase_ = layer.attention
lowerCamelCase_ = roberta_layer.self_attn_layer_norm.weight
lowerCamelCase_ = roberta_layer.self_attn_layer_norm.bias
# self attention
lowerCamelCase_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowerCamelCase_ = roberta_layer.self_attn.q_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.q_proj.bias
lowerCamelCase_ = roberta_layer.self_attn.k_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.k_proj.bias
lowerCamelCase_ = roberta_layer.self_attn.v_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowerCamelCase_ = roberta_layer.self_attn.out_proj.weight
lowerCamelCase_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowerCamelCase_ = roberta_layer.final_layer_norm.weight
lowerCamelCase_ = roberta_layer.final_layer_norm.bias
# intermediate
lowerCamelCase_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase_ = roberta_layer.fca.weight
lowerCamelCase_ = roberta_layer.fca.bias
# output
lowerCamelCase_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowerCamelCase_ = roberta_layer.fca.weight
lowerCamelCase_ = roberta_layer.fca.bias
# end of layer
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'].dense.weight
lowerCamelCase_ = roberta.model.classification_heads['mnli'].dense.bias
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.weight
lowerCamelCase_ = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
lowerCamelCase_ = roberta.model.encoder.lm_head.dense.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.dense.bias
lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ = roberta.model.encoder.lm_head.weight
lowerCamelCase_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ = roberta.encode(lowercase ).unsqueeze(0 ) # batch of size 1
lowerCamelCase_ = model(lowercase )[0]
if classification_head:
lowerCamelCase_ = roberta.model.classification_heads['mnli'](roberta.extract_features(lowercase ) )
else:
lowerCamelCase_ = roberta.model(lowercase )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ = torch.allclose(lowercase , lowercase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(lowercase ).mkdir(parents=lowercase , exist_ok=lowercase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 208 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , 'embed_dim' ) )
self.parent.assertTrue(hasattr(__a , 'num_heads' ) )
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=64 , __a=3 , __a=[16, 48, 96] , __a=[1, 3, 6] , __a=[1, 2, 10] , __a=[7, 3, 3] , __a=[4, 2, 2] , __a=[2, 1, 1] , __a=[2, 2, 2] , __a=[False, False, True] , __a=[0.0, 0.0, 0.0] , __a=0.02 , __a=1E-1_2 , __a=True , __a=True , __a=2 , ):
'''simple docstring'''
__a : str = parent
__a : List[Any] = batch_size
__a : Optional[int] = image_size
__a : List[str] = patch_sizes
__a : str = patch_stride
__a : Any = patch_padding
__a : Dict = is_training
__a : Union[str, Any] = use_labels
__a : Dict = num_labels
__a : List[Any] = num_channels
__a : Any = embed_dim
__a : int = num_heads
__a : Optional[int] = stride_kv
__a : Dict = depth
__a : List[str] = cls_token
__a : List[Any] = attention_drop_rate
__a : Tuple = initializer_range
__a : int = layer_norm_eps
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
# create a random int32 tensor of given shape
__a : str = ids_tensor([self.batch_size] , self.num_labels )
__a : str = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = TFCvtModel(config=__a )
__a : Dict = model(__a , training=__a )
__a : Any = (self.image_size, self.image_size)
__a , __a : Dict = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__a : Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__a : str = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = self.num_labels
__a : Optional[int] = TFCvtForImageClassification(__a )
__a : Dict = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Tuple = config_and_inputs
__a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
A_ = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtModelTester(self )
__a : List[Any] = TFCvtConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='Cvt does not output attentions' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = tf.keras.mixed_precision.Policy('mixed_float16' )
tf.keras.mixed_precision.set_global_policy(__a )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('float32' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : Dict = model_class(__a )
__a : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[Any] = [*signature.parameters.keys()]
__a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : List[str] = model_class(__a )
__a : Union[str, Any] = model(**self._prepare_for_class(__a , __a ) )
__a : Any = outputs.hidden_states
__a : Union[str, Any] = len(self.model_tester.depth )
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Optional[Any] = TFCvtModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__a : Tuple = self.default_image_processor
__a : Any = prepare_img()
__a : int = image_processor(images=__a , return_tensors='tf' )
# forward pass
__a : Any = model(**__a )
# verify the logits
__a : Any = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__a : Optional[Any] = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __a , atol=1E-4 ) )
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : str = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__lowercase : Tuple = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__lowercase : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__lowercase : Optional[Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 1 |
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCamelCase ( __magic_name__ : int ):
random.seed(__magic_name__ )
np.random.seed(__magic_name__ )
torch.manual_seed(__magic_name__ )
torch.cuda.manual_seed_all(__magic_name__ )
# ^^ safe to call this function even if cuda is not available
class lowerCamelCase__ :
def __init__( self : Optional[Any] , _a : Iterable[torch.nn.Parameter] , _a : float = 0.9_9_9_9 , _a : float = 0.0 , _a : int = 0 , _a : bool = False , _a : Union[float, int] = 1.0 , _a : Union[float, int] = 2 / 3 , _a : Optional[Any] = None , _a : Dict[str, Any] = None , **_a : Tuple , ):
if isinstance(_a , torch.nn.Module ):
a__: int =(
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , _a , standard_warn=_a , )
a__: Tuple =parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
a__: Tuple =True
if kwargs.get("max_value" , _a ) is not None:
a__: Union[str, Any] ="The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , _a , standard_warn=_a )
a__: Optional[Any] =kwargs["max_value"]
if kwargs.get("min_value" , _a ) is not None:
a__: List[Any] ="The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , _a , standard_warn=_a )
a__: Any =kwargs["min_value"]
a__: int =list(_a )
a__: List[Any] =[p.clone().detach() for p in parameters]
if kwargs.get("device" , _a ) is not None:
a__: List[str] ="The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , _a , standard_warn=_a )
self.to(device=kwargs["device"] )
a__: Any =None
a__: Union[str, Any] =decay
a__: List[str] =min_decay
a__: Optional[Any] =update_after_step
a__: str =use_ema_warmup
a__: List[str] =inv_gamma
a__: Any =power
a__: List[str] =0
a__: Optional[int] =None # set in `step()`
a__: Union[str, Any] =model_cls
a__: List[str] =model_config
@classmethod
def _lowerCamelCase ( cls : str , _a : Optional[Any] , _a : List[Any] ):
a__ , a__: int =model_cls.load_config(_a , return_unused_kwargs=_a )
a__: Union[str, Any] =model_cls.from_pretrained(_a )
a__: Optional[int] =cls(model.parameters() , model_cls=_a , model_config=model.config )
ema_model.load_state_dict(_a )
return ema_model
def _lowerCamelCase ( self : List[Any] , _a : str ):
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
a__: List[str] =self.model_cls.from_config(self.model_config )
a__: List[Any] =self.state_dict()
state_dict.pop("shadow_params" , _a )
model.register_to_config(**_a )
self.copy_to(model.parameters() )
model.save_pretrained(_a )
def _lowerCamelCase ( self : str , _a : int ):
a__: List[str] =max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
a__: Any =1 - (1 + step / self.inv_gamma) ** -self.power
else:
a__: Any =(1 + step) / (1_0 + step)
a__: Union[str, Any] =min(_a , self.decay )
# make sure decay is not smaller than min_decay
a__: Dict =max(_a , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _lowerCamelCase ( self : List[str] , _a : Iterable[torch.nn.Parameter] ):
if isinstance(_a , torch.nn.Module ):
a__: Optional[int] =(
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , _a , standard_warn=_a , )
a__: Dict =parameters.parameters()
a__: Tuple =list(_a )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
a__: Dict =self.get_decay(self.optimization_step )
a__: List[str] =decay
a__: Optional[Any] =1 - decay
a__: int =contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , _a ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
a__: int =deepspeed.zero.GatheredParameters(_a , modifier_rank=_a )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(_a )
def _lowerCamelCase ( self : List[str] , _a : Iterable[torch.nn.Parameter] ):
a__: Union[str, Any] =list(_a )
for s_param, param in zip(self.shadow_params , _a ):
param.data.copy_(s_param.to(param.device ).data )
def _lowerCamelCase ( self : Tuple , _a : List[Any]=None , _a : List[str]=None ):
a__: int =[
p.to(device=_a , dtype=_a ) if p.is_floating_point() else p.to(device=_a )
for p in self.shadow_params
]
def _lowerCamelCase ( self : List[Any] ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _lowerCamelCase ( self : Optional[int] , _a : Iterable[torch.nn.Parameter] ):
a__: int =[param.detach().cpu().clone() for param in parameters]
def _lowerCamelCase ( self : List[str] , _a : Iterable[torch.nn.Parameter] ):
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , _a ):
param.data.copy_(c_param.data )
# Better memory-wise.
a__: List[str] =None
def _lowerCamelCase ( self : int , _a : dict ):
a__: Optional[Any] =copy.deepcopy(_a )
a__: Optional[int] =state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
a__: str =state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , _a ):
raise ValueError("Invalid min_decay" )
a__: Optional[Any] =state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , _a ):
raise ValueError("Invalid optimization_step" )
a__: int =state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , _a ):
raise ValueError("Invalid update_after_step" )
a__: Tuple =state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , _a ):
raise ValueError("Invalid use_ema_warmup" )
a__: Optional[Any] =state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
a__: Union[str, Any] =state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
a__: Optional[int] =state_dict.get("shadow_params" , _a )
if shadow_params is not None:
a__: List[str] =shadow_params
if not isinstance(self.shadow_params , _a ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(_a , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 42 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 42 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , _UpperCamelCase , )
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
| 29 | 0 |
'''simple docstring'''
def __lowercase ( __lowercase = 100 ) -> int:
'''simple docstring'''
_A = 0
_A = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 361 |
'''simple docstring'''
import os
lowerCamelCase_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = 0
_A = 0
while index < len(__lowercase ) - 1:
_A = SYMBOLS[numerals[index]]
_A = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
_A = ""
_A = num // 1000
numerals += m_count * "M"
num %= 1000
_A = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_A = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __lowercase ( __lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_A = 0
with open(os.path.dirname(__lowercase ) + roman_numerals_filename ) as filea:
_A = filea.readlines()
for line in lines:
_A = line.strip()
_A = parse_roman_numerals(__lowercase )
_A = generate_roman_numerals(__lowercase )
savings += len(__lowercase ) - len(__lowercase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
a_ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
a_ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
a_ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/hendrycks/math" , codebase_urls=["https://github.com/hendrycks/math"] , )
def __UpperCamelCase ( self : str , a : Optional[int] , a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = 0.0
for i, j in zip(a , a ):
n_correct += 1.0 if math_equivalence.is_equiv(a , a ) else 0.0
SCREAMING_SNAKE_CASE : Optional[int] = n_correct / len(a )
return {
"accuracy": accuracy,
} | 76 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(SCREAMING_SNAKE_CASE )-1}' )
if "norm" in key:
lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(SCREAMING_SNAKE_CASE )-1}' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(SCREAMING_SNAKE_CASE )-1}' )
if "attn.q" in key:
lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(SCREAMING_SNAKE_CASE )-1}' )
if "bot_conv" in key:
lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCAmelCase = value
return new_state_dict
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCAmelCase = rename_keys(SCREAMING_SNAKE_CASE )
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 46 | 0 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : Tuple , _a : int , _a : Tuple=13 , _a : str=7 , _a : Optional[int]=True , _a : List[str]=True , _a : Dict=True , _a : Any=True , _a : Union[str, Any]=99 , _a : Tuple=16 , _a : Tuple=36 , _a : Optional[Any]=6 , _a : Dict=6 , _a : Optional[Any]=6 , _a : List[Any]=37 , _a : Any="gelu" , _a : List[str]=0.1 , _a : Union[str, Any]=0.1 , _a : Dict=512 , _a : int=16 , _a : List[Any]=2 , _a : int=0.02 , _a : int=3 , _a : Dict=4 , _a : Union[str, Any]=None , ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =seq_length
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_input_mask
_SCREAMING_SNAKE_CASE =use_token_type_ids
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =embedding_size
_SCREAMING_SNAKE_CASE =hidden_size
_SCREAMING_SNAKE_CASE =num_hidden_layers
_SCREAMING_SNAKE_CASE =num_hidden_groups
_SCREAMING_SNAKE_CASE =num_attention_heads
_SCREAMING_SNAKE_CASE =intermediate_size
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =hidden_dropout_prob
_SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =type_vocab_size
_SCREAMING_SNAKE_CASE =type_sequence_label_size
_SCREAMING_SNAKE_CASE =initializer_range
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =num_choices
_SCREAMING_SNAKE_CASE =scope
def A ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> List[str]:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def A ( self : List[Any] , _a : List[str] , _a : Any , _a : Dict , _a : List[Any] , _a : Union[str, Any] , _a : Tuple , _a : Dict ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AlbertModel(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a )
_SCREAMING_SNAKE_CASE =model(_a , token_type_ids=_a )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : int , _a : Any , _a : Dict , _a : Union[str, Any] , _a : Dict , _a : int , _a : Union[str, Any] , _a : Optional[Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AlbertForPreTraining(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , sentence_order_label=_a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def A ( self : Optional[Any] , _a : List[str] , _a : Union[str, Any] , _a : Optional[Any] , _a : Any , _a : int , _a : List[str] , _a : str ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AlbertForMaskedLM(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Any , _a : Union[str, Any] , _a : Optional[Any] , _a : Tuple , _a : Any , _a : List[Any] , _a : List[str] , _a : List[str] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AlbertForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Union[str, Any] , _a : Dict , _a : str , _a : str , _a : int , _a : int , _a : int , _a : List[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =AlbertForSequenceClassification(_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] , _a : str , _a : Union[str, Any] , _a : List[str] , _a : List[str] , _a : str , _a : Dict , _a : Tuple ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =AlbertForTokenClassification(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Optional[Any] , _a : Any , _a : Any , _a : Union[str, Any] , _a : Dict , _a : int , _a : List[str] , _a : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_choices
_SCREAMING_SNAKE_CASE =AlbertForMultipleChoice(config=_a )
model.to(_a )
model.eval()
_SCREAMING_SNAKE_CASE =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE =model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( A__ , A__ , unittest.TestCase ):
A__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A__ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ = True
def A ( self : Tuple , _a : Optional[int] , _a : List[Any] , _a : List[str]=False ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class in get_values(_a ):
_SCREAMING_SNAKE_CASE =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_a )
_SCREAMING_SNAKE_CASE =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def A ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AlbertModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 )
def A ( self : Optional[Any] ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : List[str] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A ( self : Any ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def A ( self : str ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def A ( self : Dict ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def A ( self : List[Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def A ( self : int ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def A ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE =type
self.model_tester.create_and_check_model(*_a )
@slow
def A ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =AlbertModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AlbertModel.from_pretrained('albert-base-v2' )
_SCREAMING_SNAKE_CASE =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_SCREAMING_SNAKE_CASE =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(_a , attention_mask=_a )[0]
_SCREAMING_SNAKE_CASE =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _a )
_SCREAMING_SNAKE_CASE =torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1e-4 ) )
| 114 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =checkpoint
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_in.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_in.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.norm_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.norm_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_in.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_in.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.norm_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.norm_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['quant_conv.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['quant_conv.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['post_quant_conv.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
_SCREAMING_SNAKE_CASE =len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
_SCREAMING_SNAKE_CASE ={
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(_UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_SCREAMING_SNAKE_CASE =len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
_SCREAMING_SNAKE_CASE ={
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(_UpperCamelCase )
}
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =[key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
_SCREAMING_SNAKE_CASE =vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.weight" )
_SCREAMING_SNAKE_CASE =vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.bias" )
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"down.{i}.block", 'new': f"down_blocks.{i}.resnets"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'encoder.mid.block' in key]
_SCREAMING_SNAKE_CASE =2
for i in range(1 , num_mid_res_blocks + 1 ):
_SCREAMING_SNAKE_CASE =[key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"mid.block_{i}", 'new': f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'encoder.mid.attn' in key]
_SCREAMING_SNAKE_CASE =renew_vae_attention_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
conv_attn_to_linear(_UpperCamelCase )
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =num_up_blocks - 1 - i
_SCREAMING_SNAKE_CASE =[
key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
]
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
_SCREAMING_SNAKE_CASE =vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.weight"
]
_SCREAMING_SNAKE_CASE =vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.bias"
]
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"up.{block_id}.block", 'new': f"up_blocks.{i}.resnets"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'decoder.mid.block' in key]
_SCREAMING_SNAKE_CASE =2
for i in range(1 , num_mid_res_blocks + 1 ):
_SCREAMING_SNAKE_CASE =[key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"mid.block_{i}", 'new': f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'decoder.mid.attn' in key]
_SCREAMING_SNAKE_CASE =renew_vae_attention_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
conv_attn_to_linear(_UpperCamelCase )
return new_checkpoint
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str , ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
_SCREAMING_SNAKE_CASE =io.BytesIO(r.content )
_SCREAMING_SNAKE_CASE =OmegaConf.load(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =5_12
_SCREAMING_SNAKE_CASE ='cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
_SCREAMING_SNAKE_CASE ={}
with safe_open(_UpperCamelCase , framework='pt' , device='cpu' ) as f:
for key in f.keys():
_SCREAMING_SNAKE_CASE =f.get_tensor(_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =torch.load(_UpperCamelCase , map_location=_UpperCamelCase )['state_dict']
# Convert the VAE model.
_SCREAMING_SNAKE_CASE =create_vae_diffusers_config(_UpperCamelCase , image_size=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =custom_convert_ldm_vae_checkpoint(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =AutoencoderKL(**_UpperCamelCase )
vae.load_state_dict(_UpperCamelCase )
vae.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
lowerCamelCase : List[str] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 114 | 1 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''detr'''
__lowercase : Any = ['''past_key_values''']
__lowercase : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=3 , lowerCAmelCase__=1_0_0 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=8 , lowerCAmelCase__=6 , lowerCAmelCase__=2_0_4_8 , lowerCAmelCase__=8 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1.0 , lowerCAmelCase__=False , lowerCAmelCase__="sine" , lowerCAmelCase__="resnet50" , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=1 , lowerCAmelCase__=5 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , **lowerCAmelCase__ , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""")
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""")
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""])
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = backbone_config.get("""model_type""")
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCAmelCase__)
# set timm attributes to None
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = None, None, None
__SCREAMING_SNAKE_CASE = use_timm_backbone
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = backbone
__SCREAMING_SNAKE_CASE = use_pretrained_backbone
__SCREAMING_SNAKE_CASE = dilation
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__)
@property
def snake_case_ ( self):
return self.encoder_attention_heads
@property
def snake_case_ ( self):
return self.d_model
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
return cls(backbone_config=lowerCAmelCase__ , **lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
if output["backbone_config"] is not None:
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : List[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
])
@property
def snake_case_ ( self):
return 1E-5
@property
def snake_case_ ( self):
return 1_2
| 100 |
from numpy import exp, pi, sqrt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 | 0 |
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowerCamelCase__ ( __snake_case ) -> List[Tuple[int, ...]]:
"""simple docstring"""
_UpperCamelCase = []
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__, (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(UpperCamelCase__ ) )
elif isinstance(UpperCamelCase__, torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Tuple[int, ...]:
"""simple docstring"""
_UpperCamelCase = []
for d in reversed(UpperCamelCase__ ):
idx.append(flat_idx % d )
_UpperCamelCase = flat_idx // d
return tuple(reversed(UpperCamelCase__ ) )
@torch.jit.ignore
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case = None, __snake_case = None, ) -> List[Tuple[slice, ...]]:
"""simple docstring"""
def reduce_edge_list(__snake_case ) -> None:
_UpperCamelCase = True
for i in range(len(UpperCamelCase__ ) ):
_UpperCamelCase = -1 * (i + 1)
l[reversed_idx] &= tally
_UpperCamelCase = l[reversed_idx]
if start_edges is None:
_UpperCamelCase = [s == 0 for s in start]
reduce_edge_list(UpperCamelCase__ )
if end_edges is None:
_UpperCamelCase = [e == (d - 1) for e, d in zip(UpperCamelCase__, UpperCamelCase__ )]
reduce_edge_list(UpperCamelCase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(UpperCamelCase__ ) == 0:
return [()]
elif len(UpperCamelCase__ ) == 1:
return [(slice(start[0], end[0] + 1 ),)]
_UpperCamelCase = []
_UpperCamelCase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(UpperCamelCase__, UpperCamelCase__ ):
if s == e:
path_list.append(slice(UpperCamelCase__, s + 1 ) )
else:
break
_UpperCamelCase = tuple(UpperCamelCase__ )
_UpperCamelCase = len(UpperCamelCase__ )
# start == end, and we're done
if divergence_idx == len(UpperCamelCase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCamelCase = start[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__, sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :], [d - 1 for d in dims[divergence_idx + 1 :]], dims[divergence_idx + 1 :], start_edges=start_edges[divergence_idx + 1 :], end_edges=[True for _ in end_edges[divergence_idx + 1 :]], ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCamelCase = end[divergence_idx]
return tuple(
path + (slice(UpperCamelCase__, edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]], end[divergence_idx + 1 :], dims[divergence_idx + 1 :], start_edges=[True for _ in start_edges[divergence_idx + 1 :]], end_edges=end_edges[divergence_idx + 1 :], ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_UpperCamelCase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case ) -> torch.Tensor:
"""simple docstring"""
_UpperCamelCase = t.shape[:no_batch_dims]
_UpperCamelCase = list(_flat_idx_to_idx(UpperCamelCase__, UpperCamelCase__ ) )
# _get_minimal_slice_set is inclusive
_UpperCamelCase = list(_flat_idx_to_idx(flat_end - 1, UpperCamelCase__ ) )
# Get an ordered list of slices to perform
_UpperCamelCase = _get_minimal_slice_set(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, )
_UpperCamelCase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case = False, __snake_case = None, __snake_case = False, ) -> Any:
"""simple docstring"""
if not (len(UpperCamelCase__ ) > 0):
raise ValueError('''Must provide at least one input''' )
_UpperCamelCase = [shape[:no_batch_dims] for shape in _fetch_dims(UpperCamelCase__ )]
_UpperCamelCase = tuple([max(UpperCamelCase__ ) for s in zip(*UpperCamelCase__ )] )
def _prep_inputs(__snake_case ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_UpperCamelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_UpperCamelCase = t.reshape(-1, *t.shape[no_batch_dims:] )
else:
_UpperCamelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_UpperCamelCase = tensor_tree_map(_prep_inputs, UpperCamelCase__ )
_UpperCamelCase = None
if _out is not None:
_UpperCamelCase = tensor_tree_map(lambda __snake_case : t.view([-1] + list(t.shape[no_batch_dims:] ) ), _out )
_UpperCamelCase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_UpperCamelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__snake_case ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_UpperCamelCase = 0
_UpperCamelCase = prepped_outputs
for _ in range(UpperCamelCase__ ):
# Chunk the input
if not low_mem:
_UpperCamelCase = _select_chunk
else:
_UpperCamelCase = partial(
_chunk_slice, flat_start=UpperCamelCase__, flat_end=min(UpperCamelCase__, i + chunk_size ), no_batch_dims=len(UpperCamelCase__ ), )
_UpperCamelCase = tensor_tree_map(UpperCamelCase__, UpperCamelCase__ )
# Run the layer on the chunk
_UpperCamelCase = layer(**UpperCamelCase__ )
# Allocate space for the output
if out is None:
_UpperCamelCase = tensor_tree_map(lambda __snake_case : t.new_zeros((flat_batch_dim,) + t.shape[1:] ), UpperCamelCase__ )
# Put the chunk in its pre-allocated space
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
def assign(__snake_case, __snake_case ) -> None:
for k, v in da.items():
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
assign(UpperCamelCase__, da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_UpperCamelCase = da[k]
assign(UpperCamelCase__, UpperCamelCase__ )
elif isinstance(UpperCamelCase__, UpperCamelCase__ ):
for xa, xa in zip(UpperCamelCase__, UpperCamelCase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_UpperCamelCase = xa
elif isinstance(UpperCamelCase__, torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_UpperCamelCase = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
_UpperCamelCase = tensor_tree_map(lambda __snake_case : t.view(orig_batch_dims + t.shape[1:] ), UpperCamelCase__ )
return out
class _UpperCAmelCase:
def __init__( self , __a = 5_12 , ) -> List[str]:
_UpperCamelCase = max_chunk_size
_UpperCamelCase = None
_UpperCamelCase = None
def UpperCAmelCase ( self , __a , __a , __a) -> Optional[int]:
logging.info('''Tuning chunk size...''')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_UpperCamelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
_UpperCamelCase = [c for c in candidates if c > min_chunk_size]
_UpperCamelCase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a) -> bool:
try:
with torch.no_grad():
fn(*__lowerCamelCase , chunk_size=__lowerCamelCase)
return True
except RuntimeError:
return False
_UpperCamelCase = 0
_UpperCamelCase = len(__lowerCamelCase) - 1
while i > min_viable_chunk_size_index:
_UpperCamelCase = test_chunk_size(candidates[i])
if not viable:
_UpperCamelCase = (min_viable_chunk_size_index + i) // 2
else:
_UpperCamelCase = i
_UpperCamelCase = (i + len(__lowerCamelCase) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCAmelCase ( self , __a , __a) -> Optional[int]:
_UpperCamelCase = True
for aa, aa in zip(__lowerCamelCase , __lowerCamelCase):
assert type(__lowerCamelCase) == type(__lowerCamelCase)
if isinstance(__lowerCamelCase , (list, tuple)):
consistent &= self._compare_arg_caches(__lowerCamelCase , __lowerCamelCase)
elif isinstance(__lowerCamelCase , __lowerCamelCase):
_UpperCamelCase = [v for _, v in sorted(aa.items() , key=lambda __a: x[0])]
_UpperCamelCase = [v for _, v in sorted(aa.items() , key=lambda __a: x[0])]
consistent &= self._compare_arg_caches(__lowerCamelCase , __lowerCamelCase)
else:
consistent &= aa == aa
return consistent
def UpperCAmelCase ( self , __a , __a , __a , ) -> str:
_UpperCamelCase = True
_UpperCamelCase = tree_map(lambda __a: a.shape if isinstance(__lowerCamelCase , torch.Tensor) else a , __lowerCamelCase , __lowerCamelCase)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(__lowerCamelCase)
_UpperCamelCase = self._compare_arg_caches(self.cached_arg_data , __lowerCamelCase)
else:
# Otherwise, we can reuse the precomputed value
_UpperCamelCase = False
if not consistent:
_UpperCamelCase = self._determine_favorable_chunk_size(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_UpperCamelCase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 367 |
"""simple docstring"""
class _UpperCAmelCase:
def __init__( self , __a , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = name
_UpperCamelCase = value
_UpperCamelCase = weight
def __repr__( self) -> List[str]:
'''simple docstring'''
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
return self.value
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
return self.name
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return self.weight
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
return self.value / self.weight
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
for i in range(len(__snake_case ) ):
menu.append(Things(name[i], value[i], weight[i] ) )
return menu
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = sorted(__snake_case, key=__snake_case, reverse=__snake_case )
_UpperCamelCase = []
_UpperCamelCase , _UpperCamelCase = 0.0, 0.0
for i in range(len(__snake_case ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def lowerCamelCase__ ( ) -> List[str]:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 | 0 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase__ : List[str] = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase__ : Optional[int] = {
'jukebox': 512,
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_LYRIC_TOKENS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Dict=["v3", "v2", "v2"] , _lowerCAmelCase : List[Any]=512 , _lowerCAmelCase : List[str]=5 , _lowerCAmelCase : Any="<|endoftext|>" , **_lowerCAmelCase : List[Any] , ):
SCREAMING_SNAKE_CASE_ = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else unk_token
super().__init__(
unk_token=_lowerCAmelCase , n_genres=_lowerCAmelCase , version=_lowerCAmelCase , max_n_lyric_tokens=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = version
SCREAMING_SNAKE_CASE_ = max_n_lyric_tokens
SCREAMING_SNAKE_CASE_ = n_genres
with open(_lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ = json.load(_lowerCAmelCase )
with open(_lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ = json.load(_lowerCAmelCase )
with open(_lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ = json.load(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
SCREAMING_SNAKE_CASE_ = oov.replace(R'\-\'' , R'\-+\'' )
SCREAMING_SNAKE_CASE_ = regex.compile(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.artists_encoder.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.genres_encoder.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowerCAmelCase_ ( self : int ):
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowerCAmelCase_ ( self : str ):
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict ):
SCREAMING_SNAKE_CASE_ = [self.artists_encoder.get(_lowerCAmelCase , 0 ) for artist in list_artists]
for genres in range(len(_lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = [self.genres_encoder.get(_lowerCAmelCase , 0 ) for genre in list_genres[genres]]
SCREAMING_SNAKE_CASE_ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
SCREAMING_SNAKE_CASE_ = [[self.lyrics_encoder.get(_lowerCAmelCase , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Any ):
return list(_lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_for_tokenization(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self._tokenize(_lowerCAmelCase )
return artist, genre, lyrics
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : bool = False ):
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
SCREAMING_SNAKE_CASE_ = artists[idx].lower()
SCREAMING_SNAKE_CASE_ = [genres[idx].lower()]
else:
SCREAMING_SNAKE_CASE_ = self._normalize(artists[idx] ) + '.v2'
SCREAMING_SNAKE_CASE_ = [
self._normalize(_lowerCAmelCase ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
SCREAMING_SNAKE_CASE_ = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
SCREAMING_SNAKE_CASE_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
SCREAMING_SNAKE_CASE_ = {vocab[index]: index + 1 for index in range(len(_lowerCAmelCase ) )}
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = len(_lowerCAmelCase ) + 1
SCREAMING_SNAKE_CASE_ = self.vocab
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.vocab.items()}
SCREAMING_SNAKE_CASE_ = ''
else:
SCREAMING_SNAKE_CASE_ = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
SCREAMING_SNAKE_CASE_ = self._run_strip_accents(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = lyrics.replace('\\' , '\n' )
SCREAMING_SNAKE_CASE_ = self.out_of_vocab.sub('' , _lowerCAmelCase ), [], []
return artists, genres, lyrics
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : List[Any] ):
SCREAMING_SNAKE_CASE_ = unicodedata.normalize('NFD' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = []
for char in text:
SCREAMING_SNAKE_CASE_ = unicodedata.category(_lowerCAmelCase )
if cat == "Mn":
continue
output.append(_lowerCAmelCase )
return "".join(_lowerCAmelCase )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = (
[chr(_lowerCAmelCase ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_lowerCAmelCase ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_lowerCAmelCase ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ['.']
)
SCREAMING_SNAKE_CASE_ = frozenset(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = re.compile(R'_+' )
SCREAMING_SNAKE_CASE_ = ''.join([c if c in accepted else '_' for c in text.lower()] )
SCREAMING_SNAKE_CASE_ = pattern.sub('_' , _lowerCAmelCase ).strip('_' )
return text
def lowerCAmelCase_ ( self : List[Any] , _lowerCAmelCase : List[str] ):
return " ".join(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Union[str, TensorType]] = None , _lowerCAmelCase : bool = False ):
# Convert to TensorType
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = TensorType(_lowerCAmelCase )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
SCREAMING_SNAKE_CASE_ = tf.constant
SCREAMING_SNAKE_CASE_ = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
SCREAMING_SNAKE_CASE_ = torch.tensor
SCREAMING_SNAKE_CASE_ = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
SCREAMING_SNAKE_CASE_ = jnp.array
SCREAMING_SNAKE_CASE_ = _is_jax
else:
SCREAMING_SNAKE_CASE_ = np.asarray
SCREAMING_SNAKE_CASE_ = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
SCREAMING_SNAKE_CASE_ = [inputs]
if not is_tensor(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = as_tensor(_lowerCAmelCase )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any]="" , _lowerCAmelCase : Optional[Any]="pt" ):
SCREAMING_SNAKE_CASE_ = [0, 0, 0]
SCREAMING_SNAKE_CASE_ = [artist] * len(self.version )
SCREAMING_SNAKE_CASE_ = [genres] * len(self.version )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.tokenize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._convert_token_to_id(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [-INFINITY] * len(full_tokens[-1] )
SCREAMING_SNAKE_CASE_ = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_lowerCAmelCase )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_lowerCAmelCase ) )
return (artists_file, genres_file, lyrics_file)
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = self.artists_decoder.get(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = [self.genres_decoder.get(_lowerCAmelCase ) for genre in genres_index]
SCREAMING_SNAKE_CASE_ = [self.lyrics_decoder.get(_lowerCAmelCase ) for character in lyric_index]
return artist, genres, lyrics | 225 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
lowerCamelCase__ : Union[str, Any] = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
lowerCamelCase__ : Optional[Any] = '</w>'
lowerCamelCase__ : Union[str, Any] = '@@ '
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE_ = set()
SCREAMING_SNAKE_CASE_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ = char
return pairs
# Speech2Text2 has no max input length
lowerCamelCase__ : Any = {'facebook/s2t-wav2vec2-large-en-de': 1_024}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]="<s>" , _lowerCAmelCase : Any="<pad>" , _lowerCAmelCase : List[str]="</s>" , _lowerCAmelCase : int="<unk>" , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Dict=None , **_lowerCAmelCase : Tuple , ):
super().__init__(
unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , do_lower_case=_lowerCAmelCase , **_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = do_lower_case
with open(_lowerCAmelCase , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ = json.load(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
else:
with open(_lowerCAmelCase , encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE_ = merges_handle.read().split('\n' )[:-1]
SCREAMING_SNAKE_CASE_ = [tuple(merge.split()[:2] ) for merge in merges]
SCREAMING_SNAKE_CASE_ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE_ = {}
@property
def lowerCAmelCase_ ( self : List[str] ):
return len(self.decoder )
def lowerCAmelCase_ ( self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase_ ( self : Tuple , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = bigram
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 0
while i < len(_lowerCAmelCase ):
try:
SCREAMING_SNAKE_CASE_ = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ = tuple(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ = get_pairs(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ' '.join(_lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
SCREAMING_SNAKE_CASE_ = '\n' + BPE_TOKEN_MERGES
if word.endswith(_lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = word.replace(_lowerCAmelCase , '' )
SCREAMING_SNAKE_CASE_ = word.replace(' ' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = word
return word
def lowerCAmelCase_ ( self : Optional[int] , _lowerCAmelCase : Optional[int] ):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
SCREAMING_SNAKE_CASE_ = text.lower()
SCREAMING_SNAKE_CASE_ = text.split()
SCREAMING_SNAKE_CASE_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(' ' ) ) )
return split_tokens
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : str ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = self.decoder.get(_lowerCAmelCase , self.unk_token )
return result
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = ' '.join(_lowerCAmelCase )
# make sure @@ tokens are concatenated
SCREAMING_SNAKE_CASE_ = ''.join(string.split(_lowerCAmelCase ) )
return string
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ = os.path.join(
_lowerCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + '\n' )
SCREAMING_SNAKE_CASE_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
SCREAMING_SNAKE_CASE_ = token_index
writer.write(' '.join(_lowerCAmelCase ) + '\n' )
index += 1
return (vocab_file, merges_file) | 225 | 1 |
"""simple docstring"""
import sys
import turtle
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> tuple[float, float]:
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> None:
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
"""Correct format for using this script: """
"""python fractals.py <int:depth_for_fractal>"""
)
_A = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor("""red""")
_A = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 166 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_A = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""LayoutLMv3FeatureExtractor"""]
_A = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 166 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A )
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__a = Features({"""text""": Value("""string""" )} )
__a = Features({} )
__a = "text"
@property
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return {self.text_column: "text"}
| 115 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = list(model.children() )[:-2]
__UpperCAmelCase : Tuple = nn.Sequential(*UpperCamelCase )
__UpperCAmelCase : Tuple = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : int = self.pool(self.model(UpperCamelCase ) )
__UpperCAmelCase : str = torch.flatten(UpperCamelCase , start_dim=2 )
__UpperCAmelCase : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCamelCase__ ( A ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )]
__UpperCAmelCase : Optional[Any] = os.path.dirname(UpperCamelCase )
__UpperCAmelCase : List[Any] = tokenizer
__UpperCAmelCase : List[str] = labels
__UpperCAmelCase : Optional[int] = len(UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = max_seq_length
__UpperCAmelCase : Tuple = transforms
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : int , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) )
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1]
__UpperCAmelCase : Union[str, Any] = sentence[: self.max_seq_length]
__UpperCAmelCase : Tuple = torch.zeros(self.n_classes )
__UpperCAmelCase : str = 1
__UpperCAmelCase : List[str] = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
__UpperCAmelCase : Any = self.transforms(UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : int = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [len(row["""sentence"""] ) for row in batch]
__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase )
__UpperCAmelCase : int = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
__UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ):
__UpperCAmelCase : List[str] = input_row["""sentence"""]
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] )
__UpperCAmelCase : Optional[int] = torch.stack([row["""label"""] for row in batch] )
__UpperCAmelCase : int = torch.stack([row["""image_start_token"""] for row in batch] )
__UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ) -> Dict:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 115 | 1 |
'''simple docstring'''
def _A (lowerCAmelCase__ :list[int] , lowerCAmelCase__ :list[int] ) -> None:
'''simple docstring'''
_a = len(lowerCAmelCase__ )
print('The following activities are selected:' )
# The first activity is always selected
_a = 0
print(lowerCAmelCase__ , end=',' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__ , end=',' )
_a = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : List[str] = [1, 3, 0, 5, 8, 5]
a_ : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 104 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _A () -> Optional[Any]:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase__ ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def _A () -> Any:
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def _A () -> Dict:
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase__ ):
http_head('https://huggingface.co' )
| 104 | 1 |
import random
from typing import Any
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
for _ in range(len(_a ) ):
_SCREAMING_SNAKE_CASE = random.randint(0 ,len(_a ) - 1 )
_SCREAMING_SNAKE_CASE = random.randint(0 ,len(_a ) - 1 )
_SCREAMING_SNAKE_CASE = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 306 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
def lowerCamelCase__ ( _a):
if isinstance(_a , (list, tuple)) and isinstance(videos[0] , (list, tuple)) and is_valid_image(videos[0][0]):
return videos
elif isinstance(_a , (list, tuple)) and is_valid_image(videos[0]):
return [videos]
elif is_valid_image(_a):
return [[videos]]
raise ValueError(f"Could not make batched video from {videos}")
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =['pixel_values']
def __init__( self : Optional[Any] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , ) -> None:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Tuple = size if size is not None else {"shortest_edge": 256}
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE : str = get_size_dict(a , param_name="crop_size" )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : Optional[int] = do_center_crop
SCREAMING_SNAKE_CASE : int = crop_size
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : int = rescale_factor
SCREAMING_SNAKE_CASE : Tuple = offset
SCREAMING_SNAKE_CASE : str = do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __UpperCamelCase ( self : Optional[Any] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_size_dict(a , default_to_square=a )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE : str = get_resize_output_image_size(a , size["shortest_edge"] , default_to_square=a )
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE : Dict = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(a , size=a , resample=a , data_format=a , **a )
def __UpperCamelCase ( self : List[str] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(a , size=(size["height"], size["width"]) , data_format=a , **a )
def __UpperCamelCase ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : bool = True , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = image.astype(np.floataa )
if offset:
SCREAMING_SNAKE_CASE : Union[str, Any] = image - (scale / 2)
return rescale(a , scale=a , data_format=a , **a )
def __UpperCamelCase ( self : int , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def __UpperCamelCase ( self : Tuple , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : List[str] = to_numpy_array(a )
if do_resize:
SCREAMING_SNAKE_CASE : Optional[Any] = self.resize(image=a , size=a , resample=a )
if do_center_crop:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.center_crop(a , size=a )
if do_rescale:
SCREAMING_SNAKE_CASE : Any = self.rescale(image=a , scale=a , offset=a )
if do_normalize:
SCREAMING_SNAKE_CASE : Tuple = self.normalize(image=a , mean=a , std=a )
SCREAMING_SNAKE_CASE : Optional[int] = to_channel_dimension_format(a , a )
return image
def __UpperCamelCase ( self : Dict , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , ) -> PIL.Image.Image:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Union[str, Any] = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : int = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : Optional[Any] = offset if offset is not None else self.offset
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : Optional[int] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : int = size if size is not None else self.size
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(a , default_to_square=a )
SCREAMING_SNAKE_CASE : Tuple = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(a , param_name="crop_size" )
if not valid_images(a ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
SCREAMING_SNAKE_CASE : Optional[int] = make_batched(a )
SCREAMING_SNAKE_CASE : List[Any] = [
[
self._preprocess_image(
image=a , do_resize=a , size=a , resample=a , do_center_crop=a , crop_size=a , do_rescale=a , rescale_factor=a , offset=a , do_normalize=a , image_mean=a , image_std=a , data_format=a , )
for img in video
]
for video in videos
]
SCREAMING_SNAKE_CASE : Optional[int] = {"pixel_values": videos}
return BatchFeature(data=a , tensor_type=a ) | 76 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ):
return params[F"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int="attention" ):
__lowercase : Any = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
__lowercase : str = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__lowercase : Optional[Any] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
__lowercase : List[Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__lowercase : int = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
__lowercase : List[Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__lowercase : int = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
__lowercase : str = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]=False ):
if split_mlp_wi:
__lowercase : str = params[F"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
__lowercase : int = params[F"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
__lowercase : Optional[Any] = (wi_a, wi_a)
else:
__lowercase : Any = params[F"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
__lowercase : Optional[Any] = params[F"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ):
return params[F"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def snake_case_ ( lowerCAmelCase_ : dict , *, lowerCAmelCase_ : int , lowerCAmelCase_ : bool , lowerCAmelCase_ : bool = False ):
__lowercase : Any = traverse_util.flatten_dict(variables["""target"""] )
__lowercase : str = {"""/""".join(lowerCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowercase : Optional[int] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase_ )
__lowercase : str = collections.OrderedDict()
# Shared embeddings.
__lowercase : Optional[int] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
__lowercase : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" , """pre_attention_layer_norm""" )
__lowercase , __lowercase , __lowercase , __lowercase : str = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" , """attention""" )
__lowercase : Any = layer_norm
__lowercase : List[Any] = k.T
__lowercase : Tuple = o.T
__lowercase : Tuple = q.T
__lowercase : Optional[Any] = v.T
# Block i, layer 1 (MLP).
__lowercase : List[str] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" , """pre_mlp_layer_norm""" )
__lowercase , __lowercase : Union[str, Any] = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" , lowerCAmelCase_ )
__lowercase : List[Any] = layer_norm
if split_mlp_wi:
__lowercase : Any = wi[0].T
__lowercase : List[str] = wi[1].T
else:
__lowercase : str = wi.T
__lowercase : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowercase : Optional[int] = tax_relpos_bias_lookup(
lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" ).T
__lowercase : Optional[int] = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
__lowercase : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , """encoder""" ).T
__lowercase : List[Any] = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
__lowercase : Any = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """pre_self_attention_layer_norm""" )
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """self_attention""" )
__lowercase : Union[str, Any] = layer_norm
__lowercase : List[Any] = k.T
__lowercase : List[str] = o.T
__lowercase : int = q.T
__lowercase : Dict = v.T
# Block i, layer 1 (Cross Attention).
__lowercase : Tuple = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """pre_cross_attention_layer_norm""" )
__lowercase , __lowercase , __lowercase , __lowercase : str = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """encoder_decoder_attention""" )
__lowercase : int = layer_norm
__lowercase : Optional[Any] = k.T
__lowercase : Optional[int] = o.T
__lowercase : List[Any] = q.T
__lowercase : Optional[Any] = v.T
# Block i, layer 2 (MLP).
__lowercase : Dict = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """pre_mlp_layer_norm""" )
__lowercase , __lowercase : Union[str, Any] = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , lowerCAmelCase_ )
__lowercase : List[str] = layer_norm
if split_mlp_wi:
__lowercase : Dict = wi[0].T
__lowercase : Optional[Any] = wi[1].T
else:
__lowercase : Dict = wi.T
__lowercase : Optional[int] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowercase : int = tax_relpos_bias_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" ).T
__lowercase : Optional[Any] = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowercase : Dict = old["""decoder/logits_dense/kernel"""].T
return new
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : bool ):
__lowercase : Any = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowercase : int = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowercase : Optional[Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__lowercase : Dict = state_dict["""shared.weight"""]
return state_dict
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
__lowercase : List[Any] = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
__lowercase : Tuple = convert_tax_to_pytorch(
lowerCAmelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase_ , scalable_attention=lowerCAmelCase_ )
__lowercase : int = make_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , ):
__lowercase : Union[str, Any] = MTaConfig.from_json_file(lowerCAmelCase_ )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowercase : Tuple = UMTaEncoderModel(lowerCAmelCase_ )
else:
__lowercase : Union[str, Any] = UMTaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowerCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase_ )
print("""Done""" )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
lowerCamelCase : List[str] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
) | 306 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a ) | 306 | 1 |
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __lowerCAmelCase ( a__ , a__ = True , a__ = math.inf , a__ = -math.inf , a__ = math.inf , a__ = -math.inf , a__ = False , a__ = 100 , a__ = 0.01 , a__ = 1 , ) -> Any:
__a = False
__a = search_prob
__a = start_temperate
__a = []
__a = 0
__a = None
while not search_end:
__a = current_state.score()
if best_state is None or current_score > best_state.score():
__a = current_state
scores.append(a__ )
iterations += 1
__a = None
__a = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
__a = random.randint(0 , len(a__ ) - 1 ) # picking a random neighbor
__a = neighbors.pop(a__ )
__a = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
__a = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
__a = picked_neighbor
else:
__a = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
__a = picked_neighbor
__a = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
__a = True
else:
__a = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(a__ ) , a__ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def __lowerCAmelCase ( a__ , a__ ) -> List[Any]:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
A : Tuple = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
A : int = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
# starting the problem with initial coordinates (12, 47)
A : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
A : Union[str, Any] = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
F"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
def __lowerCAmelCase ( a__ , a__ ) -> Any:
return (3 * x**2) - (6 * y)
A : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A : Optional[int] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"{local_min.score()}"
)
A : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
A : str = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
F"{local_min.score()}"
) | 6 |
# flake8: noqa
# Lint as: python3
A : Optional[Any] = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental | 6 | 1 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "geglu" , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "layer_norm" , UpperCAmelCase_ : bool = False , ):
super().__init__()
SCREAMING_SNAKE_CASE : int = only_cross_attention
SCREAMING_SNAKE_CASE : Union[str, Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
SCREAMING_SNAKE_CASE : Optional[Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'''
f''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE : Any = AdaLayerNorm(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : Dict = AdaLayerNormZero(UpperCAmelCase_ , UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : int = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = Attention(
query_dim=UpperCAmelCase_ , heads=UpperCAmelCase_ , dim_head=UpperCAmelCase_ , dropout=UpperCAmelCase_ , bias=UpperCAmelCase_ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=UpperCAmelCase_ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
SCREAMING_SNAKE_CASE : List[str] = (
AdaLayerNorm(UpperCAmelCase_ , UpperCAmelCase_ )
if self.use_ada_layer_norm
else nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ )
)
SCREAMING_SNAKE_CASE : int = Attention(
query_dim=UpperCAmelCase_ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=UpperCAmelCase_ , dim_head=UpperCAmelCase_ , dropout=UpperCAmelCase_ , bias=UpperCAmelCase_ , upcast_attention=UpperCAmelCase_ , ) # is self-attn if encoder_hidden_states is none
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[int] = None
# 3. Feed-forward
SCREAMING_SNAKE_CASE : Any = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = FeedForward(UpperCAmelCase_ , dropout=UpperCAmelCase_ , activation_fn=UpperCAmelCase_ , final_dropout=UpperCAmelCase_ )
# let chunk size default to None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[str] = 0
def _A ( self : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int ):
# Sets chunk feed-forward
SCREAMING_SNAKE_CASE : Union[str, Any] = chunk_size
SCREAMING_SNAKE_CASE : Tuple = dim
def _A ( self : str , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[torch.LongTensor] = None , UpperCAmelCase_ : Dict[str, Any] = None , UpperCAmelCase_ : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
SCREAMING_SNAKE_CASE : str = self.norma(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : Optional[Any] = self.norma(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hidden_dtype=hidden_states.dtype )
else:
SCREAMING_SNAKE_CASE : Dict = self.norma(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
SCREAMING_SNAKE_CASE : str = self.attna(
UpperCAmelCase_ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : Tuple = gate_msa.unsqueeze(1 ) * attn_output
SCREAMING_SNAKE_CASE : int = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.norma(UpperCAmelCase_ , UpperCAmelCase_ ) if self.use_ada_layer_norm else self.norma(UpperCAmelCase_ )
)
SCREAMING_SNAKE_CASE : Optional[Any] = self.attna(
UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE : Optional[Any] = attn_output + hidden_states
# 3. Feed-forward
SCREAMING_SNAKE_CASE : Dict = self.norma(UpperCAmelCase_ )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : Any = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' )
SCREAMING_SNAKE_CASE : List[str] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
SCREAMING_SNAKE_CASE : int = torch.cat(
[self.ff(UpperCAmelCase_ ) for hid_slice in norm_hidden_states.chunk(UpperCAmelCase_ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
SCREAMING_SNAKE_CASE : int = self.ff(UpperCAmelCase_ )
if self.use_ada_layer_norm_zero:
SCREAMING_SNAKE_CASE : List[Any] = gate_mlp.unsqueeze(1 ) * ff_output
SCREAMING_SNAKE_CASE : Union[str, Any] = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 4 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : str = "geglu" , UpperCAmelCase_ : bool = False , ):
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = int(dim * mult )
SCREAMING_SNAKE_CASE : str = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
SCREAMING_SNAKE_CASE : List[str] = GELU(UpperCAmelCase_ , UpperCAmelCase_ )
if activation_fn == "gelu-approximate":
SCREAMING_SNAKE_CASE : Dict = GELU(UpperCAmelCase_ , UpperCAmelCase_ , approximate="tanh" )
elif activation_fn == "geglu":
SCREAMING_SNAKE_CASE : Tuple = GEGLU(UpperCAmelCase_ , UpperCAmelCase_ )
elif activation_fn == "geglu-approximate":
SCREAMING_SNAKE_CASE : str = ApproximateGELU(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# project in
self.net.append(UpperCAmelCase_ )
# project dropout
self.net.append(nn.Dropout(UpperCAmelCase_ ) )
# project out
self.net.append(nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(UpperCAmelCase_ ) )
def _A ( self : Optional[int] , UpperCAmelCase_ : Dict ):
for module in self.net:
SCREAMING_SNAKE_CASE : Optional[Any] = module(UpperCAmelCase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : str = "none" ):
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = approximate
def _A ( self : List[str] , UpperCAmelCase_ : Any ):
if gate.device.type != "mps":
return F.gelu(UpperCAmelCase_ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def _A ( self : Optional[Any] , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : List[str] = self.proj(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = self.gelu(UpperCAmelCase_ )
return hidden_states
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
super().__init__()
SCREAMING_SNAKE_CASE : Any = nn.Linear(UpperCAmelCase_ , dim_out * 2 )
def _A ( self : Optional[Any] , UpperCAmelCase_ : Any ):
if gate.device.type != "mps":
return F.gelu(UpperCAmelCase_ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def _A ( self : Any , UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = self.proj(UpperCAmelCase_ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(UpperCAmelCase_ )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
super().__init__()
SCREAMING_SNAKE_CASE : Any = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
def _A ( self : Dict , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : str = self.proj(UpperCAmelCase_ )
return x * torch.sigmoid(1.702 * x )
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ):
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = nn.Embedding(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = nn.SiLU()
SCREAMING_SNAKE_CASE : Dict = nn.Linear(UpperCAmelCase_ , embedding_dim * 2 )
SCREAMING_SNAKE_CASE : List[Any] = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ )
def _A ( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : List[Any] = self.linear(self.silu(self.emb(UpperCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE : List[str] = torch.chunk(UpperCAmelCase_ , 2 )
SCREAMING_SNAKE_CASE : Tuple = self.norm(UpperCAmelCase_ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ):
super().__init__()
SCREAMING_SNAKE_CASE : int = CombinedTimestepLabelEmbeddings(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = nn.SiLU()
SCREAMING_SNAKE_CASE : Optional[int] = nn.Linear(UpperCAmelCase_ , 6 * embedding_dim , bias=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = nn.LayerNorm(UpperCAmelCase_ , elementwise_affine=UpperCAmelCase_ , eps=1E-6 )
def _A ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=None ):
SCREAMING_SNAKE_CASE : Tuple = self.linear(self.silu(self.emb(UpperCAmelCase_ , UpperCAmelCase_ , hidden_dtype=UpperCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE : Dict = emb.chunk(6 , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = self.norm(UpperCAmelCase_ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : float = 1E-5 ):
super().__init__()
SCREAMING_SNAKE_CASE : str = num_groups
SCREAMING_SNAKE_CASE : Optional[Any] = eps
if act_fn is None:
SCREAMING_SNAKE_CASE : Optional[int] = None
else:
SCREAMING_SNAKE_CASE : Any = get_activation(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = nn.Linear(UpperCAmelCase_ , out_dim * 2 )
def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ):
if self.act:
SCREAMING_SNAKE_CASE : str = self.act(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = self.linear(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : str = emb[:, :, None, None]
SCREAMING_SNAKE_CASE : Tuple = emb.chunk(2 , dim=1 )
SCREAMING_SNAKE_CASE : str = F.group_norm(UpperCAmelCase_ , self.num_groups , eps=self.eps )
SCREAMING_SNAKE_CASE : int = x * (1 + scale) + shift
return x
| 361 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
snake_case = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = EfficientNetConfig()
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"]
SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"]
SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"]
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"]
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"]
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : str = 1000
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , )
return preprocessor
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )}
SCREAMING_SNAKE_CASE : Dict = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
SCREAMING_SNAKE_CASE : int = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1]
SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight"
SCREAMING_SNAKE_CASE : List[str] = "classifier.bias"
return key_mapping
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) )
else:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name](
include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , )
SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables
SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE : Tuple = param.numpy()
SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase )
SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval()
SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase )
replace_params(lowercase , lowercase , lowercase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase )
SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase )
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 )
SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 0 |
"""simple docstring"""
def _snake_case ( _snake_case : list ):
for i in range(len(_snake_case ) - 1 , 0 , -1 ):
lowerCAmelCase : int = False
for j in range(_snake_case , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCAmelCase, lowerCAmelCase : Tuple = unsorted[j - 1], unsorted[j]
lowerCAmelCase : Optional[Any] = True
for j in range(_snake_case ):
if unsorted[j] > unsorted[j + 1]:
lowerCAmelCase, lowerCAmelCase : Any = unsorted[j + 1], unsorted[j]
lowerCAmelCase : int = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case__ : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case__ : str = [int(item) for item in user_input.split(''',''')]
print(f"""{cocktail_shaker_sort(unsorted) = }""")
| 60 |
"""simple docstring"""
import math
def _snake_case ( ):
lowerCAmelCase : Union[str, Any] = input('''Enter message: ''' )
lowerCAmelCase : Optional[int] = int(input(f'''Enter key [2-{len(_snake_case ) - 1}]: ''' ) )
lowerCAmelCase : str = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
lowerCAmelCase : Any = encrypt_message(_snake_case , _snake_case )
elif mode.lower().startswith('''d''' ):
lowerCAmelCase : Union[str, Any] = decrypt_message(_snake_case , _snake_case )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(f'''Output:\n{text + "|"}''' )
def _snake_case ( _snake_case : int , _snake_case : str ):
lowerCAmelCase : Optional[Any] = [''''''] * key
for col in range(_snake_case ):
lowerCAmelCase : Optional[Any] = col
while pointer < len(_snake_case ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(_snake_case )
def _snake_case ( _snake_case : int , _snake_case : str ):
lowerCAmelCase : Union[str, Any] = math.ceil(len(_snake_case ) / key )
lowerCAmelCase : str = key
lowerCAmelCase : Any = (num_cols * num_rows) - len(_snake_case )
lowerCAmelCase : Dict = [''''''] * num_cols
lowerCAmelCase : int = 0
lowerCAmelCase : int = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
lowerCAmelCase : int = 0
row += 1
return "".join(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Union[str, Any] =logging.get_logger(__name__)
lowerCamelCase : List[Any] ={'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase : Optional[Any] ={
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
lowerCamelCase : Optional[Any] ={
'''camembert-base''': 512,
}
lowerCamelCase : List[str] ='''▁'''
class __a ( A__ ):
_lowerCAmelCase : str = VOCAB_FILES_NAMES
_lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any]="<s>" , SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , SCREAMING_SNAKE_CASE : List[Any]="</s>" , SCREAMING_SNAKE_CASE : Tuple="<s>" , SCREAMING_SNAKE_CASE : Any="<unk>" , SCREAMING_SNAKE_CASE : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE : Optional[int]="<mask>" , SCREAMING_SNAKE_CASE : Dict=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE : Optional[int] , ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = AddedToken(SCREAMING_SNAKE_CASE , lstrip=SCREAMING_SNAKE_CASE , rstrip=SCREAMING_SNAKE_CASE ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else mask_token
UpperCamelCase__ : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , mask_token=SCREAMING_SNAKE_CASE , additional_special_tokens=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
UpperCamelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Any = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
UpperCamelCase__ : Optional[int] = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
UpperCamelCase__ : List[Any] = len(self.fairseq_tokens_to_ids )
UpperCamelCase__ : Any = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
UpperCamelCase__ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ : Dict = [self.cls_token_id]
UpperCamelCase__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE )) + [1]
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase__ : Dict = [self.sep_token_id]
UpperCamelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowercase ( self : int ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : str = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE )
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : List[Any] = ""
UpperCamelCase__ : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE ) + token
UpperCamelCase__ : Any = True
UpperCamelCase__ : Tuple = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE )
return out_string.strip()
def __getstate__( self : str ):
'''simple docstring'''
UpperCamelCase__ : int = self.__dict__.copy()
UpperCamelCase__ : Tuple = None
return state
def __setstate__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCamelCase__ : int = {}
UpperCamelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ : Optional[int] = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE , "wb" ) as fi:
UpperCamelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE )
return (out_vocab_file,) | 196 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __a ( A__ ):
_lowerCAmelCase : str = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCAmelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
_lowerCAmelCase : ClassVar[Features] = Features({} )
_lowerCAmelCase : str = "text"
@property
def __lowercase ( self : str ):
'''simple docstring'''
return {self.text_column: "text"} | 196 | 1 |
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Union[str, Any] = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{\"default\": {\"dataset_size\": 42}}' )
lowercase : Tuple = DatasetInfosDict.from_directory(lowerCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ),
] , )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Any:
'''simple docstring'''
lowercase : Optional[int] = str(lowerCamelCase__ )
dataset_info.write_to_directory(lowerCamelCase__ )
lowercase : Dict = DatasetInfo.from_directory(lowerCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase__ , 'dataset_info.json' ) )
def lowercase__ ( ) -> List[Any]:
'''simple docstring'''
lowercase : List[str] = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=13_37 , post_processing_size=4_42 , dataset_size=12_34 , size_in_bytes=13_37 + 4_42 + 12_34 , )
lowercase : Dict = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
lowercase : Optional[int] = yaml.safe_dump(lowerCamelCase__ )
lowercase : List[str] = yaml.safe_load(lowerCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def lowercase__ ( ) -> int:
'''simple docstring'''
lowercase : Optional[Any] = DatasetInfo()
lowercase : Tuple = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] , )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] = str(lowerCamelCase__ )
dataset_infos_dict.write_to_directory(lowerCamelCase__ )
lowercase : Optional[Any] = DatasetInfosDict.from_directory(lowerCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
lowercase : int = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
lowercase : List[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase__ , 'README.md' ) )
| 255 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__A =None
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/mbart-large-en-ro''': '''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json''',
'''facebook/mbart-large-cc25''': '''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json''',
},
}
__A ={
'''facebook/mbart-large-en-ro''': 1_0_2_4,
'''facebook/mbart-large-cc25''': 1_0_2_4,
}
# fmt: off
__A =['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=None , lowercase=None , lowercase=None , **lowercase , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
super().__init__(
vocab_file=lowercase , tokenizer_file=lowercase , bos_token=lowercase , eos_token=lowercase , sep_token=lowercase , cls_token=lowercase , unk_token=lowercase , pad_token=lowercase , mask_token=lowercase , src_lang=lowercase , tgt_lang=lowercase , additional_special_tokens=lowercase , **lowercase , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
lowerCamelCase_ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ = {
lang_code: self.convert_tokens_to_ids(lowercase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_( self ) -> str:
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , **lowercase ) -> List[Any]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ = src_lang
lowerCamelCase_ = self(lowercase , add_special_tokens=lowercase , return_tensors=lowercase , **lowercase )
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = "en_XX" , lowercase = None , lowercase = "ro_RO" , **lowercase , ) -> BatchEncoding:
lowerCamelCase_ = src_lang
lowerCamelCase_ = tgt_lang
return super().prepare_seqaseq_batch(lowercase , lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> None:
lowerCamelCase_ = self.convert_tokens_to_ids(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowercase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase_ = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,)
| 19 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
"""simple docstring"""
__lowercase =XLNetConfig.from_json_file(_lowerCAmelCase )
__lowercase =finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
__lowercase =finetuning_task
__lowercase =GLUE_TASKS_NUM_LABELS[finetuning_task]
__lowercase =XLNetForSequenceClassification(_lowerCAmelCase )
elif "squad" in finetuning_task:
__lowercase =finetuning_task
__lowercase =XLNetForQuestionAnswering(_lowerCAmelCase )
else:
__lowercase =XLNetLMHeadModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
__lowercase =os.path.join(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =os.path.join(_lowerCAmelCase , _lowerCAmelCase )
print(f"""Save PyTorch model to {os.path.abspath(_lowerCAmelCase )}""" )
torch.save(model.state_dict() , _lowerCAmelCase )
print(f"""Save configuration file to {os.path.abspath(_lowerCAmelCase )}""" )
with open(_lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
lowerCamelCase = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 48 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """wavlm"""
def __init__( self : List[str] , _lowerCAmelCase : List[Any]=3_2 , _lowerCAmelCase : int=7_6_8 , _lowerCAmelCase : Any=1_2 , _lowerCAmelCase : Union[str, Any]=1_2 , _lowerCAmelCase : List[Any]=3_0_7_2 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : Dict=1e-5 , _lowerCAmelCase : List[Any]="group" , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : Dict=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _lowerCAmelCase : Any=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : int=1_2_8 , _lowerCAmelCase : Tuple=1_6 , _lowerCAmelCase : Optional[int]=3_2_0 , _lowerCAmelCase : Union[str, Any]=8_0_0 , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Any=0.05 , _lowerCAmelCase : List[Any]=1_0 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Union[str, Any]=1_0 , _lowerCAmelCase : List[Any]=3_2_0 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[int]=1_0_0 , _lowerCAmelCase : Tuple=2_5_6 , _lowerCAmelCase : Union[str, Any]=2_5_6 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple="mean" , _lowerCAmelCase : Any=False , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Any=2_5_6 , _lowerCAmelCase : Tuple=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , _lowerCAmelCase : Dict=(5, 3, 3, 1, 1) , _lowerCAmelCase : Dict=(1, 2, 3, 1, 1) , _lowerCAmelCase : int=5_1_2 , _lowerCAmelCase : Optional[int]=8_0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Any=3 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : List[str] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase)
__lowercase =hidden_size
__lowercase =feat_extract_norm
__lowercase =feat_extract_activation
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =conv_bias
__lowercase =num_buckets
__lowercase =max_bucket_distance
__lowercase =num_conv_pos_embeddings
__lowercase =num_conv_pos_embedding_groups
__lowercase =len(self.conv_dim)
__lowercase =num_hidden_layers
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =num_attention_heads
__lowercase =hidden_dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =feat_proj_dropout
__lowercase =final_dropout
__lowercase =layerdrop
__lowercase =layer_norm_eps
__lowercase =initializer_range
__lowercase =num_ctc_classes
__lowercase =vocab_size
__lowercase =do_stable_layer_norm
__lowercase =use_weighted_layer_sum
__lowercase =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase =apply_spec_augment
__lowercase =mask_time_prob
__lowercase =mask_time_length
__lowercase =mask_time_min_masks
__lowercase =mask_feature_prob
__lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowercase =num_codevectors_per_group
__lowercase =num_codevector_groups
__lowercase =contrastive_logits_temperature
__lowercase =num_negatives
__lowercase =codevector_dim
__lowercase =proj_codevector_dim
__lowercase =diversity_loss_weight
# ctc loss
__lowercase =ctc_loss_reduction
__lowercase =ctc_zero_infinity
# adapter
__lowercase =add_adapter
__lowercase =adapter_kernel_size
__lowercase =adapter_stride
__lowercase =num_adapter_layers
__lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =xvector_output_dim
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 48 | 1 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
_a = logging.getLogger(__name__)
if __name__ == "__main__":
_a = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30522, type=int)
_a = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
_a = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
_a = Counter()
for tk_ids in data:
counter.update(tk_ids)
_a = [0] * args.vocab_size
for k, v in counter.items():
_a = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 39 | import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''char'''
lowerCamelCase_ = '''bpe'''
lowerCamelCase_ = '''wp'''
_UpperCAmelCase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = ['''image_processor''', '''char_tokenizer''']
lowerCamelCase_ = '''ViTImageProcessor'''
lowerCamelCase_ = '''MgpstrTokenizer'''
def __init__( self , lowercase=None , lowercase=None , **lowercase ):
"""simple docstring"""
A_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase , )
A_ : Optional[int] = kwargs.pop('feature_extractor' )
A_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
A_ : Union[str, Any] = tokenizer
A_ : List[Any] = AutoTokenizer.from_pretrained('gpt2' )
A_ : Any = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(lowercase , lowercase )
def __call__( self , lowercase=None , lowercase=None , lowercase=None , **lowercase ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
A_ : List[Any] = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None:
A_ : Union[str, Any] = self.char_tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if text is None:
return inputs
elif images is None:
return encodings
else:
A_ : Optional[int] = encodings['input_ids']
return inputs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ , A_ : int = sequences
A_ : Union[str, Any] = char_preds.size(0 )
A_ , A_ : Union[str, Any] = self._decode_helper(lowercase , 'char' )
A_ , A_ : List[str] = self._decode_helper(lowercase , 'bpe' )
A_ , A_ : Optional[int] = self._decode_helper(lowercase , 'wp' )
A_ : Dict = []
A_ : Optional[int] = []
for i in range(lowercase ):
A_ : List[str] = [char_scores[i], bpe_scores[i], wp_scores[i]]
A_ : int = [char_strs[i], bpe_strs[i], wp_strs[i]]
A_ : Union[str, Any] = scores.index(max(lowercase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
A_ : Dict = {}
A_ : str = final_strs
A_ : Union[str, Any] = final_scores
A_ : Optional[Any] = char_strs
A_ : Dict = bpe_strs
A_ : str = wp_strs
return out
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
A_ : List[Any] = self.char_decode
A_ : List[Any] = 1
A_ : str = '[s]'
elif format == DecodeType.BPE:
A_ : List[Any] = self.bpe_decode
A_ : Optional[int] = 2
A_ : Tuple = '#'
elif format == DecodeType.WORDPIECE:
A_ : Optional[int] = self.wp_decode
A_ : Optional[int] = 1_0_2
A_ : List[Any] = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''' )
A_ , A_ : Dict = [], []
A_ : Any = pred_logits.size(0 )
A_ : Optional[int] = pred_logits.size(1 )
A_ , A_ : int = pred_logits.topk(1 , dim=-1 , largest=lowercase , sorted=lowercase )
A_ : Dict = preds_index.view(-1 , lowercase )[:, 1:]
A_ : Any = decoder(lowercase )
A_ , A_ : List[Any] = torch.nn.functional.softmax(lowercase , dim=2 ).max(dim=2 )
A_ : List[str] = preds_max_prob[:, 1:]
for index in range(lowercase ):
A_ : int = preds_str[index].find(lowercase )
A_ : Union[str, Any] = preds_str[index][:pred_eos]
A_ : Dict = preds_index[index].cpu().tolist()
A_ : List[str] = pred_index.index(lowercase ) if eos_token in pred_index else -1
A_ : List[str] = preds_max_prob[index][: pred_eos_index + 1]
A_ : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowercase )
conf_scores.append(lowercase )
return dec_strs, conf_scores
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : int = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(lowercase )]
return decode_strs
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(lowercase )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Dict = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(lowercase )]
return decode_strs
| 140 | 0 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class snake_case ( yaml.SafeLoader):
def a_ ( self : Optional[int] , a__ : int ) -> Any:
'''simple docstring'''
_A = [self.constructed_objects[key_node] for key_node, _ in node.value]
_A = [tuple(a__ ) if isinstance(a__ , a__ ) else key for key in keys]
_A = Counter(a__ )
_A = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def a_ ( self : str , a__ : Tuple , a__ : List[str]=False ) -> List[str]:
'''simple docstring'''
_A = super().construct_mapping(a__ , deep=a__ )
self._check_no_duplicates_on_constructed_node(a__ )
return mapping
def a__ ( __lowercase ) -> Tuple[Optional[str], str]:
_A = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_A = full_content[1:].index("---" ) + 1
_A = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__lowercase )
class snake_case ( _UpperCamelCase):
# class attributes
__UpperCamelCase = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def a_ ( cls : str , a__ : Path ) -> "DatasetMetadata":
'''simple docstring'''
with open(a__ , encoding="utf-8" ) as readme_file:
_A , _A = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(a__ )
else:
return cls()
def a_ ( self : Optional[int] , a__ : Path ) -> Dict:
'''simple docstring'''
if path.exists():
with open(a__ , encoding="utf-8" ) as readme_file:
_A = readme_file.read()
else:
_A = None
_A = self._to_readme(a__ )
with open(a__ , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(a__ )
def a_ ( self : Optional[int] , a__ : Optional[str] = None ) -> str:
'''simple docstring'''
if readme_content is not None:
_A , _A = _split_yaml_from_readme(a__ )
_A = "---\n" + self.to_yaml_string() + "---\n" + content
else:
_A = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def a_ ( cls : List[Any] , a__ : str ) -> "DatasetMetadata":
'''simple docstring'''
_A = yaml.load(a__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_A = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**a__ )
def a_ ( self : str ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=a__ , allow_unicode=a__ , encoding="utf-8" , ).decode("utf-8" )
a_ = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a_ = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
a_ = ap.parse_args()
a_ = Path(args.readme_filepath)
a_ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath) | 353 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class snake_case :
def __init__( self : Optional[int] , a__ : Tuple , a__ : str=1_00 , a__ : Dict=13 , a__ : Tuple=30 , a__ : str=2 , a__ : List[Any]=3 , a__ : Dict=True , a__ : Optional[Any]=True , a__ : List[Any]=32 , a__ : Tuple=4 , a__ : Tuple=4 , a__ : Optional[int]=37 , a__ : Tuple="gelu" , a__ : Optional[int]=0.1 , a__ : int=0.1 , a__ : Optional[Any]=10 , a__ : Optional[int]=0.0_2 , a__ : Dict=3 , a__ : str=None , a__ : Any=[0, 1, 2, 3] , ) -> Tuple:
'''simple docstring'''
_A = parent
_A = 1_00
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = scope
_A = out_indices
_A = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = num_patches + 1
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels, pixel_labels
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def a_ ( self : Any , a__ : List[str] , a__ : Tuple , a__ : List[str] , a__ : str ) -> Any:
'''simple docstring'''
_A = BeitModel(config=a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self : List[str] , a__ : Optional[Any] , a__ : Tuple , a__ : Any , a__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = BeitForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def a_ ( self : Optional[Any] , a__ : Optional[int] , a__ : Optional[Any] , a__ : List[str] , a__ : Dict ) -> Dict:
'''simple docstring'''
_A = self.type_sequence_label_size
_A = BeitForImageClassification(a__ )
model.to(a__ )
model.eval()
_A = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A = 1
_A = BeitForImageClassification(a__ )
model.to(a__ )
model.eval()
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a_ ( self : Optional[Any] , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : Dict ) -> str:
'''simple docstring'''
_A = self.num_labels
_A = BeitForSemanticSegmentation(a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_A = model(a__ , labels=a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A , _A , _A , _A = config_and_inputs
_A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def a_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_A = BeitModelTester(self )
_A = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def a_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a__ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def a_ ( self : int ) -> List[Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a__ )
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a__ ), BeitForMaskedImageModeling]:
continue
_A = model_class(a__ )
model.to(a__ )
model.train()
_A = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_A = model(**a__ ).loss
loss.backward()
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_A = False
_A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_A = model_class(a__ )
model.gradient_checkpointing_enable()
model.to(a__ )
model.train()
_A = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_A = model(**a__ ).loss
loss.backward()
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = _config_zero_init(a__ )
for model_class in self.all_model_classes:
_A = model_class(config=a__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def a_ ( self : List[str] ) -> int:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = BeitModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def a__ ( ) -> Tuple:
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase):
@cached_property
def a_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(a__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=a__ , return_tensors="pt" ).pixel_values.to(a__ )
# prepare bool_masked_pos
_A = torch.ones((1, 1_96) , dtype=torch.bool ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(pixel_values=a__ , bool_masked_pos=a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , a__ )
_A = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(a__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , a__ , atol=1E-2 ) )
@slow
def a_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(a__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , a__ )
_A = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(a__ )
self.assertTrue(torch.allclose(logits[0, :3] , a__ , atol=1E-4 ) )
_A = 2_81
self.assertEqual(logits.argmax(-1 ).item() , a__ )
@slow
def a_ ( self : List[Any] ) -> int:
'''simple docstring'''
_A = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
a__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , a__ )
_A = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(a__ )
self.assertTrue(torch.allclose(logits[0, :3] , a__ , atol=1E-4 ) )
_A = 23_96
self.assertEqual(logits.argmax(-1 ).item() , a__ )
@slow
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
_A = model.to(a__ )
_A = BeitImageProcessor(do_resize=a__ , size=6_40 , do_center_crop=a__ )
_A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
_A = Image.open(ds[0]["file"] )
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , a__ )
_A = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
_A = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=a__ , )
else:
_A = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=a__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a__ , atol=1E-4 ) )
@slow
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
_A = model.to(a__ )
_A = BeitImageProcessor(do_resize=a__ , size=6_40 , do_center_crop=a__ )
_A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
_A = Image.open(ds[0]["file"] )
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits.detach().cpu()
_A = image_processor.post_process_semantic_segmentation(outputs=a__ , target_sizes=[(5_00, 3_00)] )
_A = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , a__ )
_A = image_processor.post_process_semantic_segmentation(outputs=a__ )
_A = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , a__ ) | 163 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72 |
from collections import namedtuple
a : List[Any] = namedtuple('from_to', 'from_ to')
a : Tuple = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_0_1, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'cubicyard': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'cubicfoot': from_to(0.0_2_8, 3_5.3_1_4_7),
'cup': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: str , lowerCAmelCase__: str ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(lowerCAmelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(lowerCAmelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :Tuple = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ ='''timesformer'''
def __init__( self , A=2_2_4 , A=1_6 , A=3 , A=8 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.0 , A=0.0 , A=0.02 , A=1E-6 , A=True , A="divided_space_time" , A=0 , **A , ) -> Optional[int]:
super().__init__(**A )
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Optional[Any] = patch_size
_UpperCAmelCase : Union[str, Any] = num_channels
_UpperCAmelCase : str = num_frames
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : int = qkv_bias
_UpperCAmelCase : Any = attention_type
_UpperCAmelCase : str = drop_path_rate
| 68 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : str = tau * frequency / samplerate
_UpperCAmelCase : int = sin(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Any = _sin / (2 * q_factor)
_UpperCAmelCase : Any = (1 - _cos) / 2
_UpperCAmelCase : Tuple = 1 - _cos
_UpperCAmelCase : List[str] = 1 + alpha
_UpperCAmelCase : Union[str, Any] = -2 * _cos
_UpperCAmelCase : Optional[Any] = 1 - alpha
_UpperCAmelCase : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : List[str] = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : Dict = cos(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = _sin / (2 * q_factor)
_UpperCAmelCase : Dict = (1 + _cos) / 2
_UpperCAmelCase : Dict = -1 - _cos
_UpperCAmelCase : Optional[Any] = 1 + alpha
_UpperCAmelCase : str = -2 * _cos
_UpperCAmelCase : Union[str, Any] = 1 - alpha
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : List[Any] = tau * frequency / samplerate
_UpperCAmelCase : Optional[int] = sin(UpperCamelCase__ )
_UpperCAmelCase : Dict = cos(UpperCamelCase__ )
_UpperCAmelCase : str = _sin / (2 * q_factor)
_UpperCAmelCase : Tuple = _sin / 2
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Dict = -ba
_UpperCAmelCase : str = 1 + alpha
_UpperCAmelCase : List[str] = -2 * _cos
_UpperCAmelCase : str = 1 - alpha
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : Tuple = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
_UpperCAmelCase : Optional[Any] = 1 - alpha
_UpperCAmelCase : Optional[int] = -2 * _cos
_UpperCAmelCase : str = 1 + alpha
_UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : List[str] = tau * frequency / samplerate
_UpperCAmelCase : Union[str, Any] = sin(UpperCamelCase__ )
_UpperCAmelCase : int = cos(UpperCamelCase__ )
_UpperCAmelCase : Dict = _sin / (2 * q_factor)
_UpperCAmelCase : int = 10 ** (gain_db / 40)
_UpperCAmelCase : Union[str, Any] = 1 + alpha * big_a
_UpperCAmelCase : int = -2 * _cos
_UpperCAmelCase : Any = 1 - alpha * big_a
_UpperCAmelCase : Dict = 1 + alpha / big_a
_UpperCAmelCase : str = -2 * _cos
_UpperCAmelCase : Union[str, Any] = 1 - alpha / big_a
_UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : str = tau * frequency / samplerate
_UpperCAmelCase : List[Any] = sin(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Dict = _sin / (2 * q_factor)
_UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
_UpperCAmelCase : int = (big_a + 1) - (big_a - 1) * _cos
_UpperCAmelCase : List[str] = (big_a + 1) + (big_a - 1) * _cos
_UpperCAmelCase : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
_UpperCAmelCase : Optional[int] = 2 * sqrt(UpperCamelCase__ ) * alpha
_UpperCAmelCase : Optional[Any] = big_a * (pmc + aaa)
_UpperCAmelCase : List[Any] = 2 * big_a * mpc
_UpperCAmelCase : Any = big_a * (pmc - aaa)
_UpperCAmelCase : Union[str, Any] = ppmc + aaa
_UpperCAmelCase : Dict = -2 * pmpc
_UpperCAmelCase : str = ppmc - aaa
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : Tuple = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : str = cos(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
_UpperCAmelCase : str = 10 ** (gain_db / 40)
_UpperCAmelCase : Any = (big_a + 1) - (big_a - 1) * _cos
_UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCAmelCase : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
_UpperCAmelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCAmelCase : Union[str, Any] = 2 * sqrt(UpperCamelCase__ ) * alpha
_UpperCAmelCase : str = big_a * (ppmc + aaa)
_UpperCAmelCase : List[str] = -2 * big_a * pmpc
_UpperCAmelCase : Any = big_a * (ppmc - aaa)
_UpperCAmelCase : str = pmc + aaa
_UpperCAmelCase : Any = 2 * mpc
_UpperCAmelCase : Tuple = pmc - aaa
_UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 68 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int ) -> Optional[int]:
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
__A : Any = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__A : List[str] = 1
if upper_limit > 0:
__A : Optional[Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(__snake_case ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
lowercase__ : Optional[Any] = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(f"""The Catalan numbers from 0 through {N} are:""")
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod() | 190 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _SCREAMING_SNAKE_CASE ( lowercase : str = "laptop" ):
'''simple docstring'''
lowerCamelCase_ = f"""https://www.amazon.in/laptop/s?k={product}"""
lowerCamelCase_ = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
lowerCamelCase_ = BeautifulSoup(requests.get(lowercase , headers=lowercase ).text )
# Initialize a Pandas dataframe with the column titles
lowerCamelCase_ = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
lowerCamelCase_ = item.ha.text
lowerCamelCase_ = 'https://www.amazon.in/' + item.ha.a['href']
lowerCamelCase_ = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
lowerCamelCase_ = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
lowerCamelCase_ = 'Not available'
try:
lowerCamelCase_ = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
lowerCamelCase_ = ''
try:
lowerCamelCase_ = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 1_00 )
except ValueError:
lowerCamelCase_ = float('nan' )
except AttributeError:
pass
lowerCamelCase_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCamelCase_ = ' '
lowerCamelCase_ = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase : Tuple = "headphones"
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 204 | 0 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =parent
_UpperCAmelCase : List[str] =batch_size
_UpperCAmelCase : List[str] =seq_length
_UpperCAmelCase : List[str] =is_training
_UpperCAmelCase : Optional[int] =use_input_mask
_UpperCAmelCase : int =use_token_type_ids
_UpperCAmelCase : List[Any] =use_labels
_UpperCAmelCase : int =vocab_size
_UpperCAmelCase : Union[str, Any] =hidden_size
_UpperCAmelCase : Optional[int] =num_hidden_layers
_UpperCAmelCase : List[str] =num_attention_heads
_UpperCAmelCase : Dict =intermediate_size
_UpperCAmelCase : Any =hidden_act
_UpperCAmelCase : Dict =hidden_dropout_prob
_UpperCAmelCase : Dict =attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] =max_position_embeddings
_UpperCAmelCase : Dict =type_vocab_size
_UpperCAmelCase : Dict =type_sequence_label_size
_UpperCAmelCase : Optional[int] =initializer_range
_UpperCAmelCase : List[Any] =num_labels
_UpperCAmelCase : Dict =num_choices
_UpperCAmelCase : List[Any] =scope
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase : Optional[int] =None
if self.use_input_mask:
_UpperCAmelCase : List[str] =random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase : Optional[Any] =None
if self.use_token_type_ids:
_UpperCAmelCase : Any =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase : Optional[int] =None
_UpperCAmelCase : List[str] =None
_UpperCAmelCase : List[str] =None
if self.use_labels:
_UpperCAmelCase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase : Any =ids_tensor([self.batch_size] , self.num_choices)
_UpperCAmelCase : Tuple =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any =NystromformerModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
_UpperCAmelCase : Optional[Any] =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase)
_UpperCAmelCase : Tuple =model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase)
_UpperCAmelCase : List[str] =model(_UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any =NystromformerForMaskedLM(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
_UpperCAmelCase : Optional[int] =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =NystromformerForQuestionAnswering(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
_UpperCAmelCase : Dict =model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =self.num_labels
_UpperCAmelCase : int =NystromformerForSequenceClassification(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
_UpperCAmelCase : Union[str, Any] =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : int =self.num_labels
_UpperCAmelCase : Union[str, Any] =NystromformerForTokenClassification(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
_UpperCAmelCase : int =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> Any:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.num_choices
_UpperCAmelCase : Optional[int] =NystromformerForMultipleChoice(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
_UpperCAmelCase : Union[str, Any] =input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCAmelCase : Optional[Any] =token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCAmelCase : List[str] =input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
_UpperCAmelCase : str =model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : int =self.prepare_config_and_inputs()
(
_UpperCAmelCase
) : List[Any] =config_and_inputs
_UpperCAmelCase : Optional[int] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
UpperCAmelCase =(
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase =(
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase =False
UpperCAmelCase =False
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =NystromformerModelTester(self)
_UpperCAmelCase : Any =ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=3_7)
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : List[str] =type
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase)
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase)
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase)
@slow
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : str =NystromformerModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
@require_torch
class __magic_name__ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =NystromformerModel.from_pretrained('uw-madison/nystromformer-512')
_UpperCAmelCase : Union[str, Any] =torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
_UpperCAmelCase : Any =model(_UpperCAmelCase)[0]
_UpperCAmelCase : str =torch.Size((1, 6, 7_6_8))
self.assertEqual(output.shape , _UpperCAmelCase)
_UpperCAmelCase : Union[str, Any] =torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1E-4))
@slow
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] ='''the [MASK] of Belgium is Brussels'''
_UpperCAmelCase : str =AutoTokenizer.from_pretrained('uw-madison/nystromformer-512')
_UpperCAmelCase : Optional[int] =NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512')
_UpperCAmelCase : Any =tokenizer(_UpperCAmelCase , return_tensors='pt')
with torch.no_grad():
_UpperCAmelCase : Optional[int] =model(encoding.input_ids).logits
_UpperCAmelCase : str =token_logits[:, 2, :].argmax(-1)[0]
self.assertEqual(tokenizer.decode(_UpperCAmelCase) , 'capital')
| 352 |
'''simple docstring'''
from __future__ import annotations
from random import choice
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] ):
'''simple docstring'''
return choice(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : int =random_pivot(__lowerCamelCase )
# partition based on pivot
# linear time
_UpperCAmelCase : str =[e for e in lst if e < pivot]
_UpperCAmelCase : Dict =[e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__lowerCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__lowerCamelCase ) < k - 1:
return kth_number(__lowerCamelCase , k - len(__lowerCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 242 | 0 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
A : Any = logging.get_logger(__name__)
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def a__ ( __UpperCamelCase , __UpperCamelCase ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
SCREAMING_SNAKE_CASE_ = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE_ = in_proj_weight[
: encoder_config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = dct.pop(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = val
def a__ ( __UpperCamelCase ):
if "handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def a__ ( __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = ViTConfig(image_size=3_8_4 , qkv_bias=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
SCREAMING_SNAKE_CASE_ = 1_0_2_4
SCREAMING_SNAKE_CASE_ = 4_0_9_6
SCREAMING_SNAKE_CASE_ = 2_4
SCREAMING_SNAKE_CASE_ = 1_6
SCREAMING_SNAKE_CASE_ = 1_0_2_4
else:
raise ValueError("Should either find 'base' or 'large' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = "relu"
SCREAMING_SNAKE_CASE_ = 1_0_2_4
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = ViTModel(__UpperCamelCase , add_pooling_layer=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = TrOCRForCausalLM(__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = VisionEncoderDecoderModel(encoder=__UpperCamelCase , decoder=__UpperCamelCase )
model.eval()
# load state_dict of original model, rename some keys
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location="cpu" , check_hash=__UpperCamelCase )["model"]
SCREAMING_SNAKE_CASE_ = create_rename_keys(__UpperCamelCase , __UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__UpperCamelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
SCREAMING_SNAKE_CASE_ = val
else:
SCREAMING_SNAKE_CASE_ = val
# load state dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
SCREAMING_SNAKE_CASE_ = ViTImageProcessor(size=encoder_config.image_size )
SCREAMING_SNAKE_CASE_ = RobertaTokenizer.from_pretrained("roberta-large" )
SCREAMING_SNAKE_CASE_ = TrOCRProcessor(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE_ = processor(images=prepare_img(__UpperCamelCase ) , return_tensors="pt" ).pixel_values
# verify logits
SCREAMING_SNAKE_CASE_ = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
SCREAMING_SNAKE_CASE_ = model(pixel_values=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
SCREAMING_SNAKE_CASE_ = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , __UpperCamelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A : Dict = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A : List[str] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 118 | from ....configuration_utils import PretrainedConfig
from ....utils import logging
A : str = logging.get_logger(__name__)
# TODO: upload to AWS
A : Dict = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class lowerCamelCase (SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowerCamelCase__ = '''retribert'''
def __init__( self : Optional[int] , __magic_name__ : Optional[Any]=30_522 , __magic_name__ : int=768 , __magic_name__ : Dict=8 , __magic_name__ : List[Any]=12 , __magic_name__ : Tuple=3_072 , __magic_name__ : List[Any]="gelu" , __magic_name__ : Any=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : Tuple=512 , __magic_name__ : Dict=2 , __magic_name__ : int=0.02 , __magic_name__ : List[Any]=1e-12 , __magic_name__ : List[str]=True , __magic_name__ : Dict=128 , __magic_name__ : Union[str, Any]=0 , **__magic_name__ : List[Any] , ) -> Dict:
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = share_encoders
SCREAMING_SNAKE_CASE_ = projection_dim
| 118 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'microsoft/git-base': 'https://huggingface.co/microsoft/git-base/resolve/main/config.json',
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: Tuple = """git_vision_model"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple=768 , UpperCAmelCase_ : List[str]=3_072 , UpperCAmelCase_ : List[Any]=12 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[Any]=224 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : Optional[Any]="quick_gelu" , UpperCAmelCase_ : List[Any]=1E-5 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : str=0.02 , **UpperCAmelCase_ : Union[str, Any] , ) ->List[Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
snake_case_ = hidden_size
snake_case_ = intermediate_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = num_channels
snake_case_ = patch_size
snake_case_ = image_size
snake_case_ = initializer_range
snake_case_ = attention_dropout
snake_case_ = layer_norm_eps
snake_case_ = hidden_act
@classmethod
def lowerCAmelCase ( cls : List[Any] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : Any ) ->"PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(UpperCAmelCase_ )
snake_case_ , snake_case_ = cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("""model_type""" ) == "git":
snake_case_ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_ )
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[int] = """git"""
def __init__( self : List[str] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=30_522 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : Optional[int]=6 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : int=1_024 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Optional[int]=1E-12 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : Tuple="absolute" , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=101 , UpperCAmelCase_ : str=102 , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : List[Any] , ) ->Tuple:
"""simple docstring"""
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
if vision_config is None:
snake_case_ = {}
logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" )
snake_case_ = GitVisionConfig(**UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = hidden_act
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = position_embedding_type
snake_case_ = use_cache
snake_case_ = tie_word_embeddings
snake_case_ = num_image_with_embedding
snake_case_ = bos_token_id
snake_case_ = eos_token_id
def lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.vision_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 233 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {
'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[str] = """canine"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=768 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=16_384 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Tuple=1E-12 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=0XE000 , UpperCAmelCase_ : Optional[int]=0XE001 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : List[Any]=8 , UpperCAmelCase_ : Dict=16_384 , UpperCAmelCase_ : Optional[int]=128 , **UpperCAmelCase_ : Any , ) ->int:
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = max_position_embeddings
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = type_vocab_size
snake_case_ = layer_norm_eps
# Character config:
snake_case_ = downsampling_rate
snake_case_ = upsampling_kernel_size
snake_case_ = num_hash_functions
snake_case_ = num_hash_buckets
snake_case_ = local_transformer_stride
| 233 | 1 |
'''simple docstring'''
__SCREAMING_SNAKE_CASE :List[str] = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 22 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 4_00_00_00 ):
'''simple docstring'''
lowercase = []
lowercase , lowercase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
lowercase , lowercase = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 220 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = '''▁'''
__lowerCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__lowerCAmelCase = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
__lowerCAmelCase = {
'''facebook/m2m100_418M''': 1_024,
}
# fmt: off
__lowerCAmelCase = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : int = VOCAB_FILES_NAMES
lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : int = ['input_ids', 'attention_mask']
lowerCAmelCase : List[int] = []
lowerCAmelCase : List[int] = []
def __init__( self : Any ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : Optional[Any]="<s>" ,_UpperCAmelCase : Union[str, Any]="</s>" ,_UpperCAmelCase : Dict="</s>" ,_UpperCAmelCase : int="<pad>" ,_UpperCAmelCase : Any="<unk>" ,_UpperCAmelCase : Optional[Any]="m2m100" ,_UpperCAmelCase : Optional[Dict[str, Any]] = None ,_UpperCAmelCase : Optional[Any]=8 ,**_UpperCAmelCase : Any ,):
_a : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
_a : Any = language_codes
_a : List[str] = FAIRSEQ_LANGUAGE_CODES[language_codes]
_a : Dict = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code}
_a : Any = kwargs.get('additional_special_tokens' ,[] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_UpperCAmelCase )
for lang_code in fairseq_language_code
if self.get_lang_token(_UpperCAmelCase ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_UpperCAmelCase ,tgt_lang=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,language_codes=_UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=_UpperCAmelCase ,**_UpperCAmelCase ,)
_a : int = vocab_file
_a : Optional[int] = load_json(_UpperCAmelCase )
_a : Optional[Any] = {v: k for k, v in self.encoder.items()}
_a : Optional[Any] = spm_file
_a : Optional[int] = load_spm(_UpperCAmelCase ,self.sp_model_kwargs )
_a : Dict = len(self.encoder )
_a : Optional[int] = {
self.get_lang_token(_UpperCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(_UpperCAmelCase )
}
_a : Any = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_UpperCAmelCase )}
_a : Union[str, Any] = {v: k for k, v in self.lang_token_to_id.items()}
_a : str = src_lang if src_lang is not None else 'en'
_a : Union[str, Any] = tgt_lang
_a : List[Any] = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_a : Any = num_madeup_words
@property
def __lowercase ( self : List[Any] ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __lowercase ( self : Union[str, Any] ):
return self._src_lang
@src_lang.setter
def __lowercase ( self : List[Any] ,_UpperCAmelCase : str ):
_a : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : str ):
return self.sp_model.encode(_UpperCAmelCase ,out_type=_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : int ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_UpperCAmelCase ,self.encoder[self.unk_token] )
def __lowercase ( self : Tuple ,_UpperCAmelCase : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_UpperCAmelCase ,self.unk_token )
def __lowercase ( self : List[str] ,_UpperCAmelCase : Optional[int] ):
_a : Tuple = []
_a : List[Any] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
_a : Tuple = []
else:
current_sub_tokens.append(_UpperCAmelCase )
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase )
_a : List[str] = [1] * len(self.prefix_tokens )
_a : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def __lowercase ( self : Tuple ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __lowercase ( self : Union[str, Any] ):
_a : Any = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ):
_a : Optional[Any] = self.__dict__.copy()
_a : Any = None
return state
def __setstate__( self : Optional[Any] ,_UpperCAmelCase : Dict ):
_a : str = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_a : Union[str, Any] = {}
_a : List[str] = load_spm(self.spm_file ,self.sp_model_kwargs )
def __lowercase ( self : str ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
_a : Optional[Any] = Path(_UpperCAmelCase )
if not save_dir.is_dir():
raise OSError(F"""{save_directory} should be a directory""" )
_a : Optional[int] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_a : Any = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder ,_UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,_UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase ,'wb' ) as fi:
_a : Dict = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str = "en" ,_UpperCAmelCase : Optional[List[str]] = None ,_UpperCAmelCase : str = "ro" ,**_UpperCAmelCase : int ,):
_a : Union[str, Any] = src_lang
_a : List[str] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] ,**_UpperCAmelCase : Dict ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_a : List[Any] = src_lang
_a : int = self(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = self.get_lang_id(_UpperCAmelCase )
_a : Any = tgt_lang_id
return inputs
def __lowercase ( self : Dict ):
self.set_src_lang_special_tokens(self.src_lang )
def __lowercase ( self : Dict ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __lowercase ( self : Dict ,_UpperCAmelCase : str ):
_a : int = self.get_lang_token(_UpperCAmelCase )
_a : str = self.lang_token_to_id[lang_token]
_a : Optional[int] = [self.cur_lang_id]
_a : Any = [self.eos_token_id]
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ):
_a : Union[str, Any] = self.get_lang_token(_UpperCAmelCase )
_a : Tuple = self.lang_token_to_id[lang_token]
_a : Tuple = [self.cur_lang_id]
_a : Any = [self.eos_token_id]
def __lowercase ( self : str ,_UpperCAmelCase : str ):
return self.lang_code_to_token[lang]
def __lowercase ( self : List[Any] ,_UpperCAmelCase : str ):
_a : Union[str, Any] = self.get_lang_token(_UpperCAmelCase )
return self.lang_token_to_id[lang_token]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> sentencepiece.SentencePieceProcessor:
_a : Optional[Any] = sentencepiece.SentencePieceProcessor(**lowerCAmelCase_ )
spm.Load(str(lowerCAmelCase_ ) )
return spm
def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[Dict, List]:
with open(lowerCAmelCase_ , 'r' ) as f:
return json.load(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
with open(lowerCAmelCase_ , 'w' ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=2 )
| 107 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __magic_name__ :
def __init__( self : Any ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : Tuple=32 ,_UpperCAmelCase : Dict=16 ,_UpperCAmelCase : Optional[Any]=3 ,_UpperCAmelCase : Tuple=True ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : str=32 ,_UpperCAmelCase : Tuple=4 ,_UpperCAmelCase : List[Any]=[0, 1, 2, 3] ,_UpperCAmelCase : List[str]=4 ,_UpperCAmelCase : Any=37 ,_UpperCAmelCase : List[Any]="gelu" ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : Optional[Any]=0.02 ,_UpperCAmelCase : Optional[Any]=3 ,_UpperCAmelCase : List[str]=[1, 384, 24, 24] ,_UpperCAmelCase : Union[str, Any]=True ,_UpperCAmelCase : List[str]=None ,):
_a : int = parent
_a : Optional[Any] = batch_size
_a : str = image_size
_a : str = patch_size
_a : Any = num_channels
_a : Optional[Any] = is_training
_a : str = use_labels
_a : Union[str, Any] = hidden_size
_a : int = num_hidden_layers
_a : str = backbone_out_indices
_a : List[Any] = num_attention_heads
_a : Any = intermediate_size
_a : List[Any] = hidden_act
_a : int = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : Optional[int] = initializer_range
_a : Tuple = num_labels
_a : Tuple = backbone_featmap_shape
_a : Optional[int] = scope
_a : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_a : str = (image_size // patch_size) ** 2
_a : Optional[int] = num_patches + 1
def __lowercase ( self : Dict ):
_a : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Any = None
if self.use_labels:
_a : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_a : int = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : int ):
_a : Dict = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=_UpperCAmelCase ,backbone_featmap_shape=self.backbone_featmap_shape ,)
def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = DPTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : List[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ):
_a : List[Any] = self.num_labels
_a : Dict = DPTForDepthEstimation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : str = model(_UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) )
def __lowercase ( self : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : int ,_UpperCAmelCase : List[Any] ):
_a : Optional[int] = self.num_labels
_a : Optional[int] = DPTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Optional[int] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowercase ( self : List[Any] ):
_a : List[str] = self.prepare_config_and_inputs()
_a , _a , _a : List[Any] = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCAmelCase : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[str] = False
def __lowercase ( self : Union[str, Any] ):
_a : Union[str, Any] = DPTModelTester(self )
_a : int = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def __lowercase ( self : Tuple ):
pass
def __lowercase ( self : List[str] ):
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Union[str, Any] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) )
def __lowercase ( self : Any ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_UpperCAmelCase )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Union[str, Any] = [*signature.parameters.keys()]
_a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : int ):
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Tuple = True
if model_class in get_values(_UpperCAmelCase ):
continue
_a : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : Optional[int] = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : Union[str, Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[Any] = False
_a : List[Any] = True
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
_a : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : int = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_a : Tuple = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
_a : Any = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_a : Union[str, Any] = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : Optional[Any] ):
pass
@slow
def __lowercase ( self : Union[str, Any] ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_a : List[Any] = DPTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowercase ( self : Any ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[Any] = 'add'
with self.assertRaises(_UpperCAmelCase ):
_a : List[str] = DPTForDepthEstimation(_UpperCAmelCase )
def __lowerCamelCase ( ) -> Optional[Any]:
_a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : Any ):
_a : Optional[Any] = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_a : Union[str, Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCAmelCase )
_a : Optional[int] = prepare_img()
_a : Optional[Any] = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Tuple = model(**_UpperCAmelCase )
_a : Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
_a : str = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape ,_UpperCAmelCase )
_a : int = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,_UpperCAmelCase ,atol=1E-4 ) )
| 107 | 1 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def _lowercase ( __snake_case ) -> Union[str, Any]:
__lowerCAmelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F"""{test_file} instead.""" )
__lowerCAmelCase : Any = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__lowerCAmelCase : Union[str, Any] = components[:-1] + [test_fn.replace(".py" ,"" )]
__lowerCAmelCase : List[str] = '''.'''.join(_UpperCAmelCase )
return test_module_path
def _lowercase ( __snake_case ) -> str:
__lowerCAmelCase : Union[str, Any] = get_module_path(_UpperCAmelCase )
__lowerCAmelCase : str = importlib.import_module(_UpperCAmelCase )
return test_module
def _lowercase ( __snake_case ) -> List[Any]:
__lowerCAmelCase : Dict = []
__lowerCAmelCase : List[Any] = get_test_module(_UpperCAmelCase )
for attr in dir(_UpperCAmelCase ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(_UpperCAmelCase ,_UpperCAmelCase ) )
# sort with class names
return sorted(_UpperCAmelCase ,key=lambda __snake_case : x.__name__ )
def _lowercase ( __snake_case ) -> List[Any]:
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = get_test_module(_UpperCAmelCase )
for attr in dir(_UpperCAmelCase ):
__lowerCAmelCase : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__lowerCAmelCase : Optional[Any] = getattr(_UpperCAmelCase ,"all_model_classes" ,[] )
if len(_UpperCAmelCase ) > 0:
test_classes.append(_UpperCAmelCase )
# sort with class names
return sorted(_UpperCAmelCase ,key=lambda __snake_case : x.__name__ )
def _lowercase ( __snake_case ) -> Tuple:
__lowerCAmelCase : Any = get_test_classes(_UpperCAmelCase )
__lowerCAmelCase : Tuple = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_UpperCAmelCase ,key=lambda __snake_case : x.__name__ )
def _lowercase ( __snake_case ) -> Optional[Any]:
__lowerCAmelCase : Dict = test_class()
if hasattr(_UpperCAmelCase ,"setUp" ):
test.setUp()
__lowerCAmelCase : Union[str, Any] = None
if hasattr(_UpperCAmelCase ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__lowerCAmelCase : List[str] = test.model_tester.__class__
return model_tester
def _lowercase ( __snake_case ,__snake_case ) -> List[Any]:
__lowerCAmelCase : str = get_test_classes(_UpperCAmelCase )
__lowerCAmelCase : Optional[Any] = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_UpperCAmelCase )
# sort with class names
return sorted(_UpperCAmelCase ,key=lambda __snake_case : x.__name__ )
def _lowercase ( __snake_case ,__snake_case ) -> List[Any]:
__lowerCAmelCase : Dict = get_test_classes_for_model(_UpperCAmelCase ,_UpperCAmelCase )
__lowerCAmelCase : str = []
for test_class in test_classes:
__lowerCAmelCase : List[Any] = get_model_tester_from_test_class(_UpperCAmelCase )
if tester_class is not None:
tester_classes.append(_UpperCAmelCase )
# sort with class names
return sorted(_UpperCAmelCase ,key=lambda __snake_case : x.__name__ )
def _lowercase ( __snake_case ) -> Dict:
__lowerCAmelCase : List[Any] = get_test_classes(_UpperCAmelCase )
__lowerCAmelCase : List[Any] = {test_class: get_model_tester_from_test_class(_UpperCAmelCase ) for test_class in test_classes}
return test_tester_mapping
def _lowercase ( __snake_case ) -> Optional[Any]:
__lowerCAmelCase : Dict = get_model_classes(_UpperCAmelCase )
__lowerCAmelCase : str = {
model_class: get_test_classes_for_model(_UpperCAmelCase ,_UpperCAmelCase ) for model_class in model_classes
}
return model_test_mapping
def _lowercase ( __snake_case ) -> Dict:
__lowerCAmelCase : Optional[int] = get_model_classes(_UpperCAmelCase )
__lowerCAmelCase : List[Any] = {
model_class: get_tester_classes_for_model(_UpperCAmelCase ,_UpperCAmelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def _lowercase ( __snake_case ) -> Optional[int]:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return o
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return o.__name__
elif isinstance(_UpperCAmelCase ,(list, tuple) ):
return [to_json(_UpperCAmelCase ) for x in o]
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {to_json(_UpperCAmelCase ): to_json(_UpperCAmelCase ) for k, v in o.items()}
else:
return o | 269 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase ( unittest.TestCase):
def __init__( self : int , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : Optional[Any]=3 , _lowerCamelCase : List[Any]=2_24 , _lowerCamelCase : Tuple=30 , _lowerCamelCase : List[str]=4_00 , _lowerCamelCase : Optional[Any]=True , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int=True , _lowerCamelCase : Any=[0.5, 0.5, 0.5] , _lowerCamelCase : Tuple=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
A_ : int = size if size is not None else {'''height''': 18, '''width''': 18}
A_ : Optional[int] = parent
A_ : Any = batch_size
A_ : List[str] = num_channels
A_ : List[str] = image_size
A_ : List[Any] = min_resolution
A_ : str = max_resolution
A_ : Dict = do_resize
A_ : Dict = size
A_ : str = do_normalize
A_ : List[str] = image_mean
A_ : List[str] = image_std
def a_ ( self : Optional[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( __UpperCAmelCase , unittest.TestCase):
__lowerCAmelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Union[str, Any] = EfficientFormerImageProcessorTester(self )
@property
def a_ ( self : List[Any] ):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''size''' ) )
def a_ ( self : str ):
"""simple docstring"""
pass
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
A_ : List[str] = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : Dict = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
A_ : Any = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : str = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def a_ ( self : str ):
"""simple docstring"""
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : List[str] = prepare_image_inputs(self.image_proc_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
A_ : str = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
A_ : List[str] = image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 167 | 0 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__a = logging.get_logger(__name__)
@add_end_docstrings(_a )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : str , **snake_case_ : List[str] ):
super().__init__(**snake_case_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , snake_case_ : Union[str, List[str], "Image", List["Image"]] , **snake_case_ : Union[str, Any] ):
return super().__call__(snake_case_ , **snake_case_ )
def lowerCamelCase ( self : Tuple , **snake_case_ : List[Any] ):
snake_case__ : Optional[int] = {}
if "candidate_labels" in kwargs:
snake_case__ : Union[str, Any] = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
snake_case__ : Any = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def lowerCamelCase ( self : Any , snake_case_ : str , snake_case_ : int=None , snake_case_ : List[Any]="This is a photo of {}." ):
snake_case__ : Any = load_image(snake_case_ )
snake_case__ : Optional[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
snake_case__ : Optional[Any] = candidate_labels
snake_case__ : Dict = [hypothesis_template.format(snake_case_ ) for x in candidate_labels]
snake_case__ : Dict = self.tokenizer(snake_case_ , return_tensors=self.framework , padding=snake_case_ )
snake_case__ : List[str] = [text_inputs]
return inputs
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[int] ):
snake_case__ : Optional[Any] = model_inputs.pop("""candidate_labels""" )
snake_case__ : List[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , snake_case_ ):
snake_case__ : Any = text_inputs[0]
else:
# Batching case.
snake_case__ : Tuple = text_inputs[0][0]
snake_case__ : Union[str, Any] = self.model(**snake_case_ , **snake_case_ )
snake_case__ : str = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def lowerCamelCase ( self : Optional[int] , snake_case_ : Tuple ):
snake_case__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" )
snake_case__ : Dict = model_outputs["""logits"""][0]
if self.framework == "pt":
snake_case__ : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
snake_case__ : Any = probs.tolist()
if not isinstance(snake_case_ , snake_case_ ):
snake_case__ : List[Any] = [scores]
elif self.framework == "tf":
snake_case__ : int = stable_softmax(snake_case_ , axis=-1 )
snake_case__ : str = probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
snake_case__ : Optional[int] = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(snake_case_ , snake_case_ ) , key=lambda snake_case_ : -x[0] )
]
return result
| 43 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
__a = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
__a = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
__a = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__a = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__a = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__a = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__a = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class UpperCAmelCase_ :
"""simple docstring"""
def __call__( self : str , snake_case_ : Optional[Any] , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = None , snake_case_ : Union[bool, str] = False , snake_case_ : Union[bool, str] = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[bool] = None , **snake_case_ : Union[str, Any] , ):
if titles is None and texts is None:
return super().__call__(
snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
elif titles is None or texts is None:
snake_case__ : int = titles if texts is None else texts
return super().__call__(
snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
snake_case__ : List[str] = titles if not isinstance(snake_case_ , snake_case_ ) else [titles]
snake_case__ : Union[str, Any] = texts if not isinstance(snake_case_ , snake_case_ ) else [texts]
snake_case__ : Dict = len(snake_case_ )
snake_case__ : Union[str, Any] = questions if not isinstance(snake_case_ , snake_case_ ) else [questions] * n_passages
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f"There should be as many titles than texts but got {len(snake_case_ )} titles and {len(snake_case_ )} texts." )
snake_case__ : int = super().__call__(snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""]
snake_case__ : Any = super().__call__(snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""]
snake_case__ : Dict = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case_ , snake_case_ )
]
}
if return_attention_mask is not False:
snake_case__ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Union[str, Any] = attention_mask
return self.pad(snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : BatchEncoding , snake_case_ : DPRReaderOutput , snake_case_ : int = 16 , snake_case_ : int = 64 , snake_case_ : int = 4 , ):
snake_case__ : Optional[int] = reader_input["""input_ids"""]
snake_case__ , snake_case__ , snake_case__ : List[str] = reader_output[:3]
snake_case__ : Union[str, Any] = len(snake_case_ )
snake_case__ : Tuple = sorted(range(snake_case_ ) , reverse=snake_case_ , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : Optional[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : int = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : int = len(snake_case_ )
snake_case__ : Optional[int] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case_ , top_spans=snake_case_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case_ , start_index=snake_case_ , end_index=snake_case_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : List[int] , snake_case_ : int , snake_case_ : int , ):
snake_case__ : List[str] = []
for start_index, start_score in enumerate(snake_case_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : Any = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ )
snake_case__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
snake_case__ : Union[str, Any] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class UpperCAmelCase_ ( _a , _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = READER_PRETRAINED_INIT_CONFIGURATION
lowercase = ["input_ids", "attention_mask"]
| 43 | 1 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def A ( a_ ,a_=0.999 ,a_="cosine" ,) -> List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__UpperCamelCase : List[Any] =[]
for i in range(a_ ):
__UpperCamelCase : List[str] =i / num_diffusion_timesteps
__UpperCamelCase : Optional[int] =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a_ ) / alpha_bar_fn(a_ ) ,a_ ) )
return torch.tensor(a_ ,dtype=torch.floataa )
class __A ( a , a ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =[e.name for e in KarrasDiffusionSchedulers]
UpperCamelCase__ : Optional[int] =2
@register_to_config
def __init__( self , lowerCamelCase__ = 1000 , lowerCamelCase__ = 0.00_085 , lowerCamelCase__ = 0.012 , lowerCamelCase__ = "linear" , lowerCamelCase__ = None , lowerCamelCase__ = "epsilon" , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 1.0 , lowerCamelCase__ = "linspace" , lowerCamelCase__ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
__UpperCamelCase : Optional[int] =torch.tensor(lowerCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "linear":
__UpperCamelCase : str =torch.linspace(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCamelCase : Optional[Any] =(
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCamelCase : Optional[int] =betas_for_alpha_bar(lowerCamelCase__ , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
__UpperCamelCase : str =betas_for_alpha_bar(lowerCamelCase__ , alpha_transform_type='exp' )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
__UpperCamelCase : Union[str, Any] =1.0 - self.betas
__UpperCamelCase : str =torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCamelCase : str =use_karras_sigmas
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__=None ):
"""simple docstring"""
if schedule_timesteps is None:
__UpperCamelCase : Union[str, Any] =self.timesteps
__UpperCamelCase : Tuple =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__UpperCamelCase : Tuple =1 if len(lowerCamelCase__ ) > 1 else 0
else:
__UpperCamelCase : Union[str, Any] =timestep.cpu().item() if torch.is_tensor(lowerCamelCase__ ) else timestep
__UpperCamelCase : List[str] =self._index_counter[timestep_int]
return indices[pos].item()
@property
def __lowercase ( self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.index_for_timestep(lowerCamelCase__ )
__UpperCamelCase : List[str] =self.sigmas[step_index]
__UpperCamelCase : Optional[Any] =sample / ((sigma**2 + 1) ** 0.5)
return sample
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , ):
"""simple docstring"""
__UpperCamelCase : List[str] =num_inference_steps
__UpperCamelCase : Union[str, Any] =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__UpperCamelCase : Dict =np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase__ , dtype=lowerCamelCase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__UpperCamelCase : List[str] =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase : List[str] =(np.arange(0 , lowerCamelCase__ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__UpperCamelCase : Optional[Any] =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__UpperCamelCase : Any =(np.arange(lowerCamelCase__ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase__ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__UpperCamelCase : List[Any] =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__UpperCamelCase : int =np.log(lowerCamelCase__ )
__UpperCamelCase : str =np.interp(lowerCamelCase__ , np.arange(0 , len(lowerCamelCase__ ) ) , lowerCamelCase__ )
if self.config.use_karras_sigmas:
__UpperCamelCase : Optional[Any] =self._convert_to_karras(in_sigmas=lowerCamelCase__ , num_inference_steps=self.num_inference_steps )
__UpperCamelCase : List[Any] =np.array([self._sigma_to_t(lowerCamelCase__ , lowerCamelCase__ ) for sigma in sigmas] )
__UpperCamelCase : List[Any] =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__UpperCamelCase : List[str] =torch.from_numpy(lowerCamelCase__ ).to(device=lowerCamelCase__ )
__UpperCamelCase : Optional[int] =torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__UpperCamelCase : List[Any] =torch.from_numpy(lowerCamelCase__ )
__UpperCamelCase : str =torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCamelCase__ ).startswith('mps' ):
# mps does not support float64
__UpperCamelCase : Optional[int] =timesteps.to(lowerCamelCase__ , dtype=torch.floataa )
else:
__UpperCamelCase : List[Any] =timesteps.to(device=lowerCamelCase__ )
# empty dt and derivative
__UpperCamelCase : Dict =None
__UpperCamelCase : Optional[Any] =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__UpperCamelCase : List[str] =defaultdict(lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Any =np.log(lowerCamelCase__ )
# get distribution
__UpperCamelCase : Any =log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__UpperCamelCase : Any =np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__UpperCamelCase : Optional[int] =low_idx + 1
__UpperCamelCase : Optional[int] =log_sigmas[low_idx]
__UpperCamelCase : Optional[int] =log_sigmas[high_idx]
# interpolate sigmas
__UpperCamelCase : Any =(low - log_sigma) / (low - high)
__UpperCamelCase : int =np.clip(lowerCamelCase__ , 0 , 1 )
# transform interpolation to time range
__UpperCamelCase : Tuple =(1 - w) * low_idx + w * high_idx
__UpperCamelCase : Optional[int] =t.reshape(sigma.shape )
return t
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : float =in_sigmas[-1].item()
__UpperCamelCase : float =in_sigmas[0].item()
__UpperCamelCase : Dict =7.0 # 7.0 is the value used in the paper
__UpperCamelCase : str =np.linspace(0 , 1 , lowerCamelCase__ )
__UpperCamelCase : int =sigma_min ** (1 / rho)
__UpperCamelCase : Tuple =sigma_max ** (1 / rho)
__UpperCamelCase : Dict =(max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __lowercase ( self ):
"""simple docstring"""
return self.dt is None
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = True , ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.index_for_timestep(lowerCamelCase__ )
# advance index counter by 1
__UpperCamelCase : Optional[int] =timestep.cpu().item() if torch.is_tensor(lowerCamelCase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__UpperCamelCase : List[str] =self.sigmas[step_index]
__UpperCamelCase : Tuple =self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__UpperCamelCase : Union[str, Any] =self.sigmas[step_index - 1]
__UpperCamelCase : int =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__UpperCamelCase : Any =0
__UpperCamelCase : Union[str, Any] =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__UpperCamelCase : Optional[int] =sigma_hat if self.state_in_first_order else sigma_next
__UpperCamelCase : Tuple =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__UpperCamelCase : Dict =sigma_hat if self.state_in_first_order else sigma_next
__UpperCamelCase : Union[str, Any] =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__UpperCamelCase : Dict =model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
__UpperCamelCase : Any =pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__UpperCamelCase : int =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__UpperCamelCase : List[str] =sigma_next - sigma_hat
# store for 2nd order step
__UpperCamelCase : Optional[Any] =derivative
__UpperCamelCase : Optional[Any] =dt
__UpperCamelCase : Optional[int] =sample
else:
# 2. 2nd order / Heun's method
__UpperCamelCase : Any =(sample - pred_original_sample) / sigma_next
__UpperCamelCase : List[str] =(self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__UpperCamelCase : Optional[Any] =self.dt
__UpperCamelCase : Union[str, Any] =self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__UpperCamelCase : Optional[Any] =None
__UpperCamelCase : Union[str, Any] =None
__UpperCamelCase : str =None
__UpperCamelCase : str =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase__ )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] =self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase__ ):
# mps does not support float64
__UpperCamelCase : Tuple =self.timesteps.to(original_samples.device , dtype=torch.floataa )
__UpperCamelCase : Tuple =timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__UpperCamelCase : Optional[Any] =self.timesteps.to(original_samples.device )
__UpperCamelCase : Tuple =timesteps.to(original_samples.device )
__UpperCamelCase : List[str] =[self.index_for_timestep(lowerCamelCase__ , lowerCamelCase__ ) for t in timesteps]
__UpperCamelCase : Optional[int] =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__UpperCamelCase : List[str] =sigma.unsqueeze(-1 )
__UpperCamelCase : Tuple =original_samples + noise * sigma
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 71 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def a ( __a ) -> str:
'''simple docstring'''
re.sub('''<n>''' , '''''' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) ) | 97 | 0 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
_enforce_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if n == 0:
return 0
__lowerCAmelCase: int = float("-inf" )
for i in range(1 , n + 1 ):
__lowerCAmelCase: Dict = max(
__SCREAMING_SNAKE_CASE , prices[i - 1] + naive_cut_rod_recursive(n - i , __SCREAMING_SNAKE_CASE ) )
return max_revue
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
_enforce_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
__lowerCAmelCase: int = float("-inf" )
for i in range(1 , n + 1 ):
__lowerCAmelCase: Tuple = max(
__SCREAMING_SNAKE_CASE , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
__lowerCAmelCase: List[str] = max_revenue
return max_rev[n]
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
_enforce_args(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
__lowerCAmelCase: int = [float("-inf" ) for _ in range(n + 1 )]
__lowerCAmelCase: List[Any] = 0
for i in range(1 , n + 1 ):
__lowerCAmelCase: Union[str, Any] = max_rev[i]
for j in range(1 , i + 1 ):
__lowerCAmelCase: List[str] = max(__SCREAMING_SNAKE_CASE , prices[j - 1] + max_rev[i - j] )
__lowerCAmelCase: Dict = max_revenue_i
return max_rev[n]
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
if n < 0:
__lowerCAmelCase: List[str] = F"n must be greater than or equal to 0. Got n = {n}"
raise ValueError(__SCREAMING_SNAKE_CASE )
if n > len(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = (
"Each integral piece of rod must have a corresponding price. "
F"Got n = {n} but length of prices = {len(__SCREAMING_SNAKE_CASE )}"
)
raise ValueError(__SCREAMING_SNAKE_CASE )
def a__ ( ) -> List[str]:
__lowerCAmelCase: Optional[int] = [6, 1_0, 1_2, 1_5, 2_0, 2_3]
__lowerCAmelCase: Tuple = len(__SCREAMING_SNAKE_CASE )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
__lowerCAmelCase: List[str] = 3_6
__lowerCAmelCase: Optional[int] = top_down_cut_rod(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Tuple = bottom_up_cut_rod(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = naive_cut_rod_recursive(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 108 |
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
__A = get_logger(__name__)
class snake_case :
SCREAMING_SNAKE_CASE_ : List[Any] = """dummy_data"""
SCREAMING_SNAKE_CASE_ : List[Any] = """datasets"""
SCREAMING_SNAKE_CASE_ : Any = False
def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Union[Version, str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[List[Callable]] = None , )-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = 0
__lowerCAmelCase: Tuple = dataset_name
__lowerCAmelCase: Optional[Any] = cache_dir
__lowerCAmelCase: Optional[int] = use_local_dummy_data
__lowerCAmelCase: Optional[Any] = config
# download_callbacks take a single url as input
__lowerCAmelCase: List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__lowerCAmelCase: Union[str, Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__lowerCAmelCase: List[str] = str(UpperCamelCase__)
# to be downloaded
__lowerCAmelCase: Dict = None
__lowerCAmelCase: Dict = None
@property
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
if self._dummy_file is None:
__lowerCAmelCase: Tuple = self.download_dummy_data()
return self._dummy_file
@property
def lowercase_ ( self : Dict)-> Optional[Any]:
'''simple docstring'''
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name)
@property
def lowercase_ ( self : List[str])-> Any:
'''simple docstring'''
return os.path.join(self.dummy_data_folder , "dummy_data.zip")
def lowercase_ ( self : Optional[Any])-> List[str]:
'''simple docstring'''
__lowerCAmelCase: Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__lowerCAmelCase: str = cached_path(
UpperCamelCase__ , cache_dir=self.cache_dir , extract_compressed_file=UpperCamelCase__ , force_extract=UpperCamelCase__)
return os.path.join(UpperCamelCase__ , self.dummy_file_name)
@property
def lowercase_ ( self : Dict)-> List[Any]:
'''simple docstring'''
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def lowercase_ ( self : Optional[Any])-> Tuple:
'''simple docstring'''
if self._bucket_url is None:
__lowerCAmelCase: int = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/"))
return self._bucket_url
@property
def lowercase_ ( self : str)-> Optional[int]:
'''simple docstring'''
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/").split("/")[:-1])
def lowercase_ ( self : List[Any] , UpperCamelCase__ : int , *UpperCamelCase__ : List[str])-> Optional[int]:
'''simple docstring'''
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__lowerCAmelCase: List[Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__lowerCAmelCase: str = self.dummy_file_name
# special case when data_url is a dict
if isinstance(UpperCamelCase__ , UpperCamelCase__):
return self.create_dummy_data_dict(UpperCamelCase__ , UpperCamelCase__)
elif isinstance(UpperCamelCase__ , (list, tuple)):
return self.create_dummy_data_list(UpperCamelCase__ , UpperCamelCase__)
else:
return self.create_dummy_data_single(UpperCamelCase__ , UpperCamelCase__)
def lowercase_ ( self : Dict , UpperCamelCase__ : Dict , *UpperCamelCase__ : int)-> Dict:
'''simple docstring'''
return self.download_and_extract(UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any])-> str:
'''simple docstring'''
return self.download_and_extract(UpperCamelCase__)
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[Any] , *UpperCamelCase__ : int , **UpperCamelCase__ : str)-> List[str]:
'''simple docstring'''
return path
def lowercase_ ( self : Optional[Any])-> Any:
'''simple docstring'''
return {}
def lowercase_ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(UpperCamelCase__ , UpperCamelCase__):
for single_url in single_urls:
download_callback(UpperCamelCase__)
else:
__lowerCAmelCase: Union[str, Any] = single_urls
download_callback(UpperCamelCase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Dict = [os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(Path(UpperCamelCase__).name)) for x in single_urls]
else:
__lowerCAmelCase: Any = single_urls
__lowerCAmelCase: Optional[int] = os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(Path(UpperCamelCase__).name))
__lowerCAmelCase: Dict = value
# make sure that values are unique
if all(isinstance(UpperCamelCase__ , UpperCamelCase__) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
__lowerCAmelCase: Any = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any])-> int:
'''simple docstring'''
__lowerCAmelCase: Tuple = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__lowerCAmelCase: Any = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , UpperCamelCase__)) for url in data_url)
__lowerCAmelCase: str = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed") for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
__lowerCAmelCase: Optional[int] = [data_url[0]] * len(UpperCamelCase__)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCAmelCase: Optional[Any] = os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(single_url.split("/")[-1]))
dummy_data_list.append(UpperCamelCase__)
return dummy_data_list
def lowercase_ ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any])-> Optional[int]:
'''simple docstring'''
for download_callback in self.download_callbacks:
download_callback(UpperCamelCase__)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__lowerCAmelCase: List[Any] = os.path.join(UpperCamelCase__ , urllib.parse.quote_plus(data_url.split("/")[-1]))
if os.path.exists(UpperCamelCase__) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def lowercase_ ( self : List[str])-> Dict:
'''simple docstring'''
pass
def lowercase_ ( self : Union[str, Any])-> Tuple:
'''simple docstring'''
pass
def lowercase_ ( self : Dict , UpperCamelCase__ : str)-> int:
'''simple docstring'''
def _iter_archive_members(UpperCamelCase__ : str):
# this preserves the order of the members inside the ZIP archive
__lowerCAmelCase: Optional[Any] = Path(self.dummy_file).parent
__lowerCAmelCase: Optional[int] = path.relative_to(UpperCamelCase__)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
__lowerCAmelCase: Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(UpperCamelCase__)
__lowerCAmelCase: str = Path(UpperCamelCase__)
__lowerCAmelCase: Optional[Any] = _iter_archive_members(UpperCamelCase__) if self.use_local_dummy_data else path.rglob("*")
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__")):
yield file_path.relative_to(UpperCamelCase__).as_posix(), file_path.open("rb")
def lowercase_ ( self : str , UpperCamelCase__ : str)-> str:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
__lowerCAmelCase: Dict = [paths]
for path in paths:
if os.path.isfile(UpperCamelCase__):
if os.path.basename(UpperCamelCase__).startswith((".", "__")):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(UpperCamelCase__):
if os.path.basename(UpperCamelCase__).startswith((".", "__")):
continue
dirnames.sort()
for filename in sorted(UpperCamelCase__):
if filename.startswith((".", "__")):
continue
yield os.path.join(UpperCamelCase__ , UpperCamelCase__)
| 108 | 1 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A ( a_ ,a_ ,a_ ) -> Tuple:
# Construct model
if openai_config_file == "":
__UpperCamelCase : int =OpenAIGPTConfig()
else:
__UpperCamelCase : Any =OpenAIGPTConfig.from_json_file(a_ )
__UpperCamelCase : Any =OpenAIGPTModel(a_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(a_ ,a_ ,a_ )
# Save pytorch-model
__UpperCamelCase : Optional[Any] =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__UpperCamelCase : Optional[Any] =pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() ,a_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(a_ ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
A_ :Optional[int] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 71 |
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , A_ ) -> float:
return 0.0
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__UpperCamelCase =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =5_12
__UpperCamelCase =[1] + [0] * (size - 1)
__UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__UpperCamelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase =np.abs(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
__UpperCamelCase =20 * np.logaa(SCREAMING_SNAKE_CASE__ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
__UpperCamelCase =get_bounds(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(SCREAMING_SNAKE_CASE__ )
plt.show()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : FilterType , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =5_12
__UpperCamelCase =[1] + [0] * (size - 1)
__UpperCamelCase =[filter_type.process(SCREAMING_SNAKE_CASE__ ) for item in inputs]
__UpperCamelCase =[0] * (samplerate - size) # zero-padding
outputs += filler
__UpperCamelCase =np.angle(np.fft.fft(SCREAMING_SNAKE_CASE__ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE__ , -2 * pi ) )
plt.show()
| 62 | 0 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCAmelCase_ : Any = Lock()
def _A (__a , __a , __a , __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ : int = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE_ : int = min(__a , __a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE_ : Optional[Any] = max(__a , __a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__a )
def _A (__a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = []
SCREAMING_SNAKE_CASE_ : str = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE_ : Tuple = Pipe()
SCREAMING_SNAKE_CASE_ : str = Pipe()
process_array_.append(
Process(
target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE_ : Any = temp_rs
SCREAMING_SNAKE_CASE_ : Optional[Any] = temp_rr
for i in range(1 , len(__a ) - 1 ):
SCREAMING_SNAKE_CASE_ : Optional[int] = Pipe()
SCREAMING_SNAKE_CASE_ : Optional[int] = Pipe()
process_array_.append(
Process(
target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE_ : Tuple = temp_rs
SCREAMING_SNAKE_CASE_ : str = temp_rr
process_array_.append(
Process(
target=__a , args=(
len(__a ) - 1,
arr[len(__a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _A () -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*__a )
SCREAMING_SNAKE_CASE_ : Any = odd_even_transposition(__a )
print('''Sorted List\n''' )
print(*__a )
if __name__ == "__main__":
main()
| 318 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip())
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = start_length
SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
SCREAMING_SNAKE_CASE_ : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(lowercase_)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1]
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : str = '''false'''
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : List[str] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' )
SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' )
SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : int = []
for task in tqdm(range(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test''']
SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})'
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 318 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=3 , A=4 , A=None , ) -> int:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def snake_case_( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_( self ) -> Any:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def snake_case_( self , A , A , A , A , A , A , A ) -> str:
_SCREAMING_SNAKE_CASE = NystromformerModel(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A )
_SCREAMING_SNAKE_CASE = model(A , token_type_ids=A )
_SCREAMING_SNAKE_CASE = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_( self , A , A , A , A , A , A , A ) -> str:
_SCREAMING_SNAKE_CASE = NystromformerForMaskedLM(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_( self , A , A , A , A , A , A , A ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = NystromformerForQuestionAnswering(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(
A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_( self , A , A , A , A , A , A , A ) -> Any:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = NystromformerForSequenceClassification(A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_( self , A , A , A , A , A , A , A ) -> str:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = NystromformerForTokenClassification(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = model(A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_( self , A , A , A , A , A , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = NystromformerForMultipleChoice(config=A )
model.to(A )
model.eval()
_SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = model(
A , attention_mask=A , token_type_ids=A , labels=A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = NystromformerModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A , hidden_size=37 )
def snake_case_( self ) -> int:
self.config_tester.run_common_tests()
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_SCREAMING_SNAKE_CASE = type
self.model_tester.create_and_check_model(*A )
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@slow
def snake_case_( self ) -> int:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(A )[0]
_SCREAMING_SNAKE_CASE = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , A )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , A , atol=1e-4 ) )
@slow
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = """the [MASK] of Belgium is Brussels"""
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
_SCREAMING_SNAKE_CASE = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
_SCREAMING_SNAKE_CASE = tokenizer(A , return_tensors="""pt""" )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = model(encoding.input_ids ).logits
_SCREAMING_SNAKE_CASE = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(A ) , """capital""" )
| 58 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 162 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = TextToVideoSDPipeline
__lowerCamelCase : str = TEXT_TO_IMAGE_PARAMS
__lowerCamelCase : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
__lowerCamelCase : Dict = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def _lowerCAmelCase ( self ):
torch.manual_seed(0 )
A : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D"""), up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D"""), cross_attention_dim=32, attention_head_dim=4, )
A : Optional[Any] = DDIMScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule="""scaled_linear""", clip_sample=lowerCamelCase__, set_alpha_to_one=lowerCamelCase__, )
torch.manual_seed(0 )
A : str = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, sample_size=128, )
torch.manual_seed(0 )
A : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="""gelu""", projection_dim=512, )
A : int = CLIPTextModel(lowerCamelCase__ )
A : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=0 ):
if str(lowerCamelCase__ ).startswith("""mps""" ):
A : Optional[int] = torch.manual_seed(lowerCamelCase__ )
else:
A : Optional[int] = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
A : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def _lowerCAmelCase ( self ):
A : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
A : Tuple = self.get_dummy_components()
A : Dict = TextToVideoSDPipeline(**lowerCamelCase__ )
A : List[Any] = sd_pipe.to(lowerCamelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A : int = self.get_dummy_inputs(lowerCamelCase__ )
A : Any = """np"""
A : Optional[int] = sd_pipe(**lowerCamelCase__ ).frames
A : str = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A : Optional[Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase__, expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available(), reason="""XFormers attention is only available with CUDA and `xformers` installed""", )
def _lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase__, expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _lowerCAmelCase ( self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def _lowerCAmelCase ( self ):
pass
def _lowerCAmelCase ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
A : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A : int = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A : Optional[int] = pipe.to("""cuda""" )
A : int = """Spiderman is surfing"""
A : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
A : List[Any] = pipe(lowerCamelCase__, generator=lowerCamelCase__, num_inference_steps=25, output_type="""pt""" ).frames
A : Optional[int] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def _lowerCAmelCase ( self ):
A : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A : str = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A : Dict = pipe.to("""cuda""" )
A : int = """Spiderman is surfing"""
A : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
A : Dict = pipe(lowerCamelCase__, generator=lowerCamelCase__, num_inference_steps=2, output_type="""pt""" ).frames
A : List[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 115 |
import requests
SCREAMING_SNAKE_CASE_:List[str] = """""" # <-- Put your OpenWeatherMap appid here!
SCREAMING_SNAKE_CASE_:Dict = """https://api.openweathermap.org/data/2.5/"""
def __UpperCamelCase ( _lowerCAmelCase = "Chicago" , _lowerCAmelCase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + """weather""" , params=locals() ).json()
def __UpperCamelCase ( _lowerCAmelCase = "Kolkata, India" , _lowerCAmelCase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + """forecast""" , params=locals() ).json()
def __UpperCamelCase ( _lowerCAmelCase = 55.68 , _lowerCAmelCase = 12.57 , _lowerCAmelCase = APPID ) -> dict:
"""simple docstring"""
return requests.get(URL_BASE + """onecall""" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
SCREAMING_SNAKE_CASE_:int = input("""Enter a location:""").strip()
if location:
pprint(current_weather(location))
else:
break
| 115 | 1 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : Optional[int] ) -> list:
lowerCamelCase_ = len(__UpperCAmelCase )
for i in range(1 , __UpperCAmelCase ):
lowerCamelCase_ = collection[i]
lowerCamelCase_ = 0
lowerCamelCase_ = i - 1
while low <= high:
lowerCamelCase_ = (low + high) // 2
if val < collection[mid]:
lowerCamelCase_ = mid - 1
else:
lowerCamelCase_ = mid + 1
for j in range(__UpperCAmelCase , __UpperCAmelCase , -1 ):
lowerCamelCase_ = collection[j - 1]
lowerCamelCase_ = val
return collection
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
_SCREAMING_SNAKE_CASE : List[Any] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 183 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__A = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 177 | 0 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
__UpperCamelCase : Any = {"target_lang": "fi", "source_lang": "en"}
__UpperCamelCase : Optional[int] = ">>zh<<"
__UpperCamelCase : List[Any] = "Helsinki-NLP/"
if is_torch_available():
__UpperCamelCase : str = "pt"
elif is_tf_available():
__UpperCamelCase : Union[str, Any] = "tf"
else:
__UpperCamelCase : int = "jax"
@require_sentencepiece
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = MarianTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = True
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
super().setUp()
a = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
a = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
a = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
a = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self :Any , **__magic_name__ :str ):
'''simple docstring'''
return MarianTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def lowerCamelCase__ ( self :Dict , __magic_name__ :List[Any] ):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = """</s>"""
a = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__magic_name__ ) , 9 )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = MarianTokenizer.from_pretrained(F'{ORG_NAME}opus-mt-en-de' )
a = en_de_tokenizer(["""I am a small frog"""] , return_tensors=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
a = [38, 121, 14, 697, 3_8848, 0]
self.assertListEqual(__magic_name__ , batch.input_ids[0] )
a = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(__magic_name__ )
a = [x.name for x in Path(__magic_name__ ).glob("""*""" )]
self.assertIn("""source.spm""" , __magic_name__ )
MarianTokenizer.from_pretrained(__magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = self.get_tokenizer()
a = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=__magic_name__ , truncation=__magic_name__ , return_tensors=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
a = self.get_tokenizer()
a = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=__magic_name__ , return_tensors=__magic_name__ )
self.assertIsInstance(__magic_name__ , __magic_name__ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = {"""input_ids""": [[4_3495, 462, 20, 4_2164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 3_8999, 6, 8, 464, 132, 1703, 492, 13, 4669, 3_7867, 13, 7525, 27, 1593, 988, 13, 3_3972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 1_2338, 2, 1_3958, 387, 2, 3629, 6953, 188, 2900, 2, 1_3958, 8011, 1_1501, 23, 8460, 4073, 3_4009, 20, 435, 1_1439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 3_7867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 2_6453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 1_0767, 6, 316, 304, 4239, 3, 0], [148, 1_5722, 19, 1839, 12, 1350, 13, 2_2327, 5082, 5418, 4_7567, 3_5938, 59, 318, 1_9552, 108, 2183, 54, 1_4976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 1_9088, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100], [36, 6395, 1_2570, 3_9147, 1_1597, 6, 266, 4, 4_5405, 7296, 3, 0, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100, 5_8100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
a = """Tämä on testi"""
a = """This is a test"""
a = [76, 7, 2047, 2]
a = [69, 12, 11, 940, 2]
a = tokenizer(__magic_name__ ).input_ids
self.assertListEqual(__magic_name__ , __magic_name__ )
a = tokenizer(text_target=__magic_name__ ).input_ids
self.assertListEqual(__magic_name__ , __magic_name__ )
a = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
| 347 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Dict = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :List[str] , __magic_name__ :int="</s>" , __magic_name__ :List[Any]="<unk>" , __magic_name__ :Optional[Any]="<pad>" , __magic_name__ :Optional[int]=125 , __magic_name__ :List[str]=None , **__magic_name__ :List[str] , ):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
a = [F'<extra_id_{i}>' for i in range(__magic_name__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
a = len(set(filter(lambda __magic_name__ : bool("""extra_id""" in str(__magic_name__ ) ) , __magic_name__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else pad_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else eos_token
a = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else unk_token
super().__init__(
eos_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , extra_ids=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
a = extra_ids
a = 2**8 # utf is 8 bits
# define special tokens dict
a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
a = len(self.special_tokens_encoder )
a = len(__magic_name__ )
for i, token in enumerate(__magic_name__ ):
a = self.vocab_size + i - n
a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCamelCase__ ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None , __magic_name__ :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__magic_name__ )) + [1]
return ([0] * len(__magic_name__ )) + [1] + ([0] * len(__magic_name__ )) + [1]
def lowerCamelCase__ ( self :str , __magic_name__ :List[int] ):
'''simple docstring'''
if len(__magic_name__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ):
'''simple docstring'''
a = self._add_eos_if_not_present(__magic_name__ )
if token_ids_a is None:
return token_ids_a
else:
a = self._add_eos_if_not_present(__magic_name__ )
return token_ids_a + token_ids_a
def lowerCamelCase__ ( self :List[str] , __magic_name__ :str ):
'''simple docstring'''
a = [chr(__magic_name__ ) for i in text.encode("""utf-8""" )]
return tokens
def lowerCamelCase__ ( self :Tuple , __magic_name__ :str ):
'''simple docstring'''
if token in self.special_tokens_encoder:
a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
a = self.added_tokens_encoder[token]
elif len(__magic_name__ ) != 1:
a = self.unk_token_id
else:
a = ord(__magic_name__ ) + self._num_special_tokens
return token_id
def lowerCamelCase__ ( self :List[str] , __magic_name__ :Dict ):
'''simple docstring'''
if index in self.special_tokens_decoder:
a = self.special_tokens_decoder[index]
else:
a = chr(index - self._num_special_tokens )
return token
def lowerCamelCase__ ( self :Tuple , __magic_name__ :Optional[int] ):
'''simple docstring'''
a = b""""""
for token in tokens:
if token in self.special_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
a = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
a = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
a = token.encode("""utf-8""" )
else:
a = bytes([ord(__magic_name__ )] )
bstring += tok_string
a = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :str , __magic_name__ :Optional[str] = None ):
'''simple docstring'''
return ()
| 347 | 1 |
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A__ : List[Any] = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
A__ : Any = subprocess.check_output(F"git diff --name-only {fork_point_sha}".split()).decode('utf-8').split()
A__ : Tuple = '|'.join(sys.argv[1:])
A__ : Any = re.compile(rF"^({joined_dirs}).*?\.py$")
A__ : List[Any] = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 207 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A__ : Tuple = logging.get_logger(__name__)
A__ : int = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def __init__( self : Optional[int], lowerCamelCase : int=None, lowerCamelCase : int=None, *lowerCamelCase : List[Any], **lowerCamelCase : Any ):
'''simple docstring'''
super().__init__(*lowerCamelCase, **lowerCamelCase )
if config is None:
assert isinstance(self.model, lowerCamelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
lowercase__ = self.model.config
else:
lowercase__ = config
lowercase__ = data_args
lowercase__ = self.config.tgt_vocab_size if isinstance(self.config, lowerCamelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
lowercase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
lowercase__ = label_smoothed_nll_loss
def lowercase__ ( self : List[Any], lowerCamelCase : int ):
'''simple docstring'''
if self.optimizer is None:
lowercase__ = ['''bias''', '''LayerNorm.weight''']
lowercase__ = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
lowercase__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
lowercase__ = Adafactor
lowercase__ = {'''scale_parameter''': False, '''relative_step''': False}
else:
lowercase__ = AdamW
lowercase__ = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
lowercase__ = self.args.learning_rate
if self.sharded_ddp:
lowercase__ = OSS(
params=lowerCamelCase, optim=lowerCamelCase, **lowerCamelCase, )
else:
lowercase__ = optimizer_cls(lowerCamelCase, **lowerCamelCase )
if self.lr_scheduler is None:
lowercase__ = self._get_lr_scheduler(lowerCamelCase )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def lowercase__ ( self : List[str], lowerCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
lowercase__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
lowercase__ = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps )
else:
lowercase__ = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=lowerCamelCase )
return scheduler
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
if isinstance(self.train_dataset, torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0]
lowercase__ = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) )
else:
# compute usual loss via models
lowercase__ , lowercase__ = model(**lowerCamelCase, labels=lowerCamelCase, use_cache=lowerCamelCase )[:2]
else:
# compute label smoothed loss
lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0]
lowercase__ = torch.nn.functional.log_softmax(lowerCamelCase, dim=-1 )
lowercase__ , lowercase__ = self.loss_fn(lowerCamelCase, lowerCamelCase, self.args.label_smoothing, ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase__ ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = inputs.pop('''labels''' )
lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase )
return loss
def lowercase__ ( self : str, lowerCamelCase : nn.Module, lowerCamelCase : Dict[str, Union[torch.Tensor, Any]], lowerCamelCase : bool, lowerCamelCase : Optional[List[str]] = None, ):
'''simple docstring'''
lowercase__ = self._prepare_inputs(lowerCamelCase )
lowercase__ = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
lowercase__ = self.model.generate(
inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], **lowerCamelCase, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] )
lowercase__ = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase )
lowercase__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
lowercase__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def lowercase__ ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Any ):
'''simple docstring'''
# If PAD token is not defined at least EOS token has to be defined
lowercase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F""" padded to `max_length`={max_length}""" )
lowercase__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device )
lowercase__ = tensor
return padded_tensor
| 207 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] =ProphetNetTokenizer
lowercase : int =False
def UpperCamelCase ( self ):
super().setUp()
lowercase_ :Optional[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase_ :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :List[str] = '''UNwant\u00E9d,running'''
lowercase_ :Tuple = '''unwanted, running'''
return input_text, output_text
def UpperCamelCase ( self ):
lowercase_ :Dict = self.tokenizer_class(self.vocab_file )
lowercase_ :Tuple = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(UpperCamelCase_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCamelCase ( self ):
lowercase_ :Tuple = BasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCamelCase ( self ):
lowercase_ :Dict = BasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCamelCase ( self ):
lowercase_ :Any = BasicTokenizer(do_lower_case=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase ( self ):
lowercase_ :List[str] = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase ( self ):
lowercase_ :List[str] = BasicTokenizer(do_lower_case=UpperCamelCase_ , strip_accents=UpperCamelCase_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = BasicTokenizer(do_lower_case=UpperCamelCase_ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
lowercase_ :Optional[Any] = {}
for i, token in enumerate(UpperCamelCase_ ):
lowercase_ :Optional[Any] = i
lowercase_ :int = WordpieceTokenizer(vocab=UpperCamelCase_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
lowercase_ :Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase_ :str = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
lowercase_ :List[Any] = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''pt''' )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
lowercase_ :List[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def UpperCamelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCamelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCamelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def UpperCamelCase ( self ):
lowercase_ :Any = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
lowercase_ :Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase_ )
lowercase_ :Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase_ )
lowercase_ :Union[str, Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
lowercase_ :Any = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 252 |
from itertools import count
def UpperCamelCase ( _a = 5_0 ) -> int:
'''simple docstring'''
lowercase_ :Dict = [1] * min_block_length
for n in count(_a ):
fill_count_functions.append(1 )
for block_length in range(_a , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 252 | 1 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE( A__ , A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self : str , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : str , __snake_case : bool = False , ) -> int:
super().__init__()
UpperCAmelCase : Any = nn.Embedding(__snake_case , __snake_case )
UpperCAmelCase : Tuple = nn.Embedding(__snake_case , __snake_case )
UpperCAmelCase : List[Any] = False
UpperCAmelCase : Dict = nn.Dropout(p=__snake_case )
UpperCAmelCase : str = TaConfig(
vocab_size=__snake_case , d_model=__snake_case , num_heads=__snake_case , d_kv=__snake_case , d_ff=__snake_case , dropout_rate=__snake_case , feed_forward_proj=__snake_case , is_decoder=__snake_case , is_encoder_decoder=__snake_case , )
UpperCAmelCase : List[str] = nn.ModuleList()
for lyr_num in range(__snake_case ):
UpperCAmelCase : int = TaBlock(__snake_case )
self.encoders.append(__snake_case )
UpperCAmelCase : Dict = TaLayerNorm(__snake_case )
UpperCAmelCase : Dict = nn.Dropout(p=__snake_case )
def A ( self : Union[str, Any] , __snake_case : List[Any] , __snake_case : str ) -> Dict:
UpperCAmelCase : Dict = self.token_embedder(__snake_case )
UpperCAmelCase : Tuple = encoder_input_tokens.shape[1]
UpperCAmelCase : Union[str, Any] = torch.arange(__snake_case , device=encoder_input_tokens.device )
x += self.position_encoding(__snake_case )
UpperCAmelCase : int = self.dropout_pre(__snake_case )
# inverted the attention mask
UpperCAmelCase : List[str] = encoder_input_tokens.size()
UpperCAmelCase : str = self.get_extended_attention_mask(__snake_case , __snake_case )
for lyr in self.encoders:
UpperCAmelCase : Dict = lyr(__snake_case , __snake_case )[0]
UpperCAmelCase : Optional[int] = self.layer_norm(__snake_case )
return self.dropout_post(__snake_case ), encoder_inputs_mask
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase__ = TaTokenizerFast
UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 339 | 0 |
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCAmelCase__ = 'pytorch_model.bin'
lowerCAmelCase__ = 'pytorch_model.bin.index.json'
lowerCAmelCase__ = 'adapter_config.json'
lowerCAmelCase__ = 'adapter_model.bin'
lowerCAmelCase__ = 'adapter_model.safetensors'
lowerCAmelCase__ = 'tf_model.h5'
lowerCAmelCase__ = 'tf_model.h5.index.json'
lowerCAmelCase__ = 'model.ckpt'
lowerCAmelCase__ = 'flax_model.msgpack'
lowerCAmelCase__ = 'flax_model.msgpack.index.json'
lowerCAmelCase__ = 'model.safetensors'
lowerCAmelCase__ = 'model.safetensors.index.json'
lowerCAmelCase__ = 'config.json'
lowerCAmelCase__ = 'preprocessor_config.json'
lowerCAmelCase__ = FEATURE_EXTRACTOR_NAME
lowerCAmelCase__ = 'generation_config.json'
lowerCAmelCase__ = 'modelcard.json'
lowerCAmelCase__ = '▁'
lowerCAmelCase__ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCAmelCase__ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCAmelCase__ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCAmelCase__ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def __lowerCamelCase ( lowerCAmelCase__ ):
if version.parse(lowerCAmelCase__ ) < version.parse(lowerCAmelCase__ ):
if "dev" in min_version:
lowerCAmelCase__ = (
'This example requires a source install from HuggingFace Transformers (see '
'`https://huggingface.co/docs/transformers/installation#install-from-source`),'
)
else:
lowerCAmelCase__ = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ 'Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other '
'versions of HuggingFace Transformers.' )
| 119 | def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = len(lowerCAmelCase__ )
for i in range(lowerCAmelCase__ ):
for j in range(i + 1 , lowerCAmelCase__ ):
if numbers[j] < numbers[i]:
lowerCAmelCase__ , lowerCAmelCase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase__ = input('Enter numbers separated by a comma:\n').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 119 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 341 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 10**9 ):
_snake_case = 1
_snake_case = 2
_snake_case = 0
_snake_case = 0
_snake_case = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_snake_case = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''') | 341 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase: Tuple = {'configuration_ibert': ['IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'IBertConfig', 'IBertOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Tuple = [
'IBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'IBertForMaskedLM',
'IBertForMultipleChoice',
'IBertForQuestionAnswering',
'IBertForSequenceClassification',
'IBertForTokenClassification',
'IBertModel',
'IBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase: str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 96 |
'''simple docstring'''
from math import factorial, pi
def lowerCamelCase__ ( _A , _A = 30 ):
if not isinstance(_A , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(_A , _A ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
a : Dict = float(_A )
a : List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_A ) )
def lowerCamelCase__ ( _A , _A = 30 ):
if not isinstance(_A , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(_A , _A ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
a : int = float(_A )
a : str = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5)) | 96 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['ConvNextFeatureExtractor']
_a = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 61 |
def SCREAMING_SNAKE_CASE__ ( ) -> list[list[int]]:
return [list(range(1000 - i ,-1000 - i ,-1 ) ) for i in range(1000 )]
lowerCamelCase : List[Any] = generate_large_matrix()
lowerCamelCase : Optional[int] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
assert all(row == sorted(lowercase ,reverse=lowercase ) for row in grid )
assert all(list(lowercase ) == sorted(lowercase ,reverse=lowercase ) for col in zip(*lowercase ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Tuple = 0
snake_case : List[Any] = len(lowercase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case : Tuple = (left + right) // 2
snake_case : Dict = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case : List[Any] = mid + 1
else:
snake_case : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(lowercase )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Union[str, Any] = 0
snake_case : Dict = len(grid[0] )
for i in range(len(lowercase ) ):
snake_case : Tuple = find_negative_index(grid[i][:bound] )
total += bound
return (len(lowercase ) * len(grid[0] )) - total
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Dict = 0
for row in grid:
for i, number in enumerate(lowercase ):
if number < 0:
total += len(lowercase ) - i
break
return total
def SCREAMING_SNAKE_CASE__ ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
snake_case : List[Any] = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case : int = timeit(f"""{func}(grid=grid)""" ,setup=lowercase ,number=500 )
print(f"""{func}() took {time:0.4f} seconds""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 124 | 0 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 5_0_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = set()
lowerCAmelCase_ :Union[str, Any] = int((limit - 2_4) ** (1 / 2) )
lowerCAmelCase_ :Any = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , lowercase__ ) ) )
for primea in primes:
lowerCAmelCase_ :int = primea * primea
for primea in primes:
lowerCAmelCase_ :Any = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
lowerCAmelCase_ :Optional[Any] = primea * primea * primea * primea
lowerCAmelCase_ :Tuple = square + cube + tetr
if total >= limit:
break
ret.add(lowercase__ )
return len(lowercase__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCAmelCase_ :List[str] = "CIDAS/clipseg-rd64-refined"
UpperCAmelCase_ :List[Any] = "image_segmenter"
UpperCAmelCase_ :Optional[int] = CLIPSegForImageSegmentation
UpperCAmelCase_ :Tuple = ["image", "text"]
UpperCAmelCase_ :Dict = ["image"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=__A , return_tensors="""pt""" )
def __lowerCAmelCase ( self , __A ) -> Tuple:
with torch.no_grad():
lowerCAmelCase_ :Dict = self.model(**__A ).logits
return logits
def __lowerCAmelCase ( self , __A ) -> Tuple:
lowerCAmelCase_ :Optional[int] = outputs.cpu().detach().numpy()
lowerCAmelCase_ :List[str] = 0
lowerCAmelCase_ :str = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 1 | 1 |
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=100 , UpperCamelCase__=13 , UpperCamelCase__=30 , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=4 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__=3 , UpperCamelCase__=None , UpperCamelCase__=[0, 1, 2, 3] , ) -> List[Any]:
lowerCamelCase : Optional[Any] = parent
lowerCamelCase : str = 100
lowerCamelCase : int = batch_size
lowerCamelCase : Tuple = image_size
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Dict = num_channels
lowerCamelCase : Optional[int] = is_training
lowerCamelCase : int = use_labels
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : Any = intermediate_size
lowerCamelCase : List[Any] = hidden_act
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : Dict = type_sequence_label_size
lowerCamelCase : int = initializer_range
lowerCamelCase : Optional[int] = scope
lowerCamelCase : Tuple = out_indices
lowerCamelCase : Dict = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase : List[str] = (image_size // patch_size) ** 2
lowerCamelCase : Optional[Any] = num_patches + 1
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Optional[int] = None
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase ( self ) -> str:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
lowerCamelCase : Dict = BeitModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : Optional[Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
lowerCamelCase : int = BeitForMaskedImageModeling(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
lowerCamelCase : str = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = BeitForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : List[Any] = 1
lowerCamelCase : Any = BeitForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Any = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
lowerCamelCase : Optional[int] = self.num_labels
lowerCamelCase : str = BeitForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowerCamelCase : Optional[int] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : int = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = config_and_inputs
lowerCamelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ (lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[int] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase_ : int = (
{
"""feature-extraction""": BeitModel,
"""image-classification""": BeitForImageClassification,
"""image-segmentation""": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : List[str] = False
lowerCamelCase_ : str = False
def _lowercase ( self ) -> List[str]:
lowerCamelCase : Dict = BeitModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _lowercase ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def _lowercase ( self ) -> str:
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _lowercase ( self ) -> Any:
pass
def _lowercase ( self ) -> str:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[Any] = model_class(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : str = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _lowercase ( self ) -> int:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self ) -> Any:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def _lowercase ( self ) -> List[str]:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def _lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
def _lowercase ( self ) -> Tuple:
if not self.model_tester.is_training:
return
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Tuple = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(UpperCamelCase__ ), BeitForMaskedImageModeling]:
continue
lowerCamelCase : int = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
lowerCamelCase : Dict = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase : Any = model(**UpperCamelCase__ ).loss
loss.backward()
def _lowercase ( self ) -> List[str]:
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowerCamelCase : Optional[Any] = False
lowerCamelCase : str = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(UpperCamelCase__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowerCamelCase : Dict = model_class(UpperCamelCase__ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase__ )
model.train()
lowerCamelCase : Union[str, Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
lowerCamelCase : Optional[int] = model(**UpperCamelCase__ ).loss
loss.backward()
def _lowercase ( self ) -> int:
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : str = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def _lowercase ( self ) -> List[str]:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Optional[Any] = BeitModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def A ( ) -> Tuple:
lowerCamelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> Tuple:
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowercase ( self ) -> str:
lowerCamelCase : List[str] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(UpperCamelCase__ )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : str = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).pixel_values.to(UpperCamelCase__ )
# prepare bool_masked_pos
lowerCamelCase : List[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase : Dict = model(pixel_values=UpperCamelCase__ , bool_masked_pos=UpperCamelCase__ )
lowerCamelCase : List[str] = outputs.logits
# verify the logits
lowerCamelCase : Dict = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase : int = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , UpperCamelCase__ , atol=1e-2 ) )
@slow
def _lowercase ( self ) -> Dict:
lowerCamelCase : int = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(UpperCamelCase__ )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Optional[Any] = prepare_img()
lowerCamelCase : int = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase : List[str] = model(**UpperCamelCase__ )
lowerCamelCase : Optional[Any] = outputs.logits
# verify the logits
lowerCamelCase : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase : str = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
lowerCamelCase : List[str] = 281
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase__ )
@slow
def _lowercase ( self ) -> str:
lowerCamelCase : Union[str, Any] = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
UpperCamelCase__ )
lowerCamelCase : int = self.default_image_processor
lowerCamelCase : Tuple = prepare_img()
lowerCamelCase : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**UpperCamelCase__ )
lowerCamelCase : Tuple = outputs.logits
# verify the logits
lowerCamelCase : int = torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase : Any = torch.tensor([1.6881, -0.2787, 0.5901] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
lowerCamelCase : str = 2396
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase__ )
@slow
def _lowercase ( self ) -> int:
lowerCamelCase : int = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
lowerCamelCase : str = model.to(UpperCamelCase__ )
lowerCamelCase : Tuple = BeitImageProcessor(do_resize=UpperCamelCase__ , size=640 , do_center_crop=UpperCamelCase__ )
lowerCamelCase : Optional[int] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
lowerCamelCase : str = Image.open(ds[0]["file"] )
lowerCamelCase : List[Any] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**UpperCamelCase__ )
lowerCamelCase : Optional[Any] = outputs.logits
# verify the logits
lowerCamelCase : List[str] = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase : Any = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
lowerCamelCase : List[str] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=UpperCamelCase__ , )
else:
lowerCamelCase : str = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def _lowercase ( self ) -> List[str]:
lowerCamelCase : List[Any] = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
lowerCamelCase : Optional[int] = model.to(UpperCamelCase__ )
lowerCamelCase : int = BeitImageProcessor(do_resize=UpperCamelCase__ , size=640 , do_center_crop=UpperCamelCase__ )
lowerCamelCase : List[Any] = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
lowerCamelCase : str = Image.open(ds[0]["file"] )
lowerCamelCase : Any = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase : Dict = model(**UpperCamelCase__ )
lowerCamelCase : Optional[int] = outputs.logits.detach().cpu()
lowerCamelCase : int = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] )
lowerCamelCase : int = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase : Optional[Any] = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 48 | '''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def UpperCamelCase_ ( snake_case_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = 3_84
__lowerCAmelCase = 7
if "tiny" in model_name:
__lowerCAmelCase = 96
__lowerCAmelCase = (2, 2, 6, 2)
__lowerCAmelCase = (3, 6, 12, 24)
elif "small" in model_name:
__lowerCAmelCase = 96
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (3, 6, 12, 24)
elif "base" in model_name:
__lowerCAmelCase = 1_28
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (4, 8, 16, 32)
__lowerCAmelCase = 12
__lowerCAmelCase = 5_12
elif "large" in model_name:
__lowerCAmelCase = 1_92
__lowerCAmelCase = (2, 2, 18, 2)
__lowerCAmelCase = (6, 12, 24, 48)
__lowerCAmelCase = 12
__lowerCAmelCase = 7_68
# set label information
__lowerCAmelCase = 1_50
__lowerCAmelCase = """huggingface/label-files"""
__lowerCAmelCase = """ade20k-id2label.json"""
__lowerCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) )
__lowerCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
__lowerCAmelCase = SwinConfig(
embed_dim=snake_case_ , depths=snake_case_ , num_heads=snake_case_ , window_size=snake_case_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
__lowerCAmelCase = UperNetConfig(
backbone_config=snake_case_ , auxiliary_in_channels=snake_case_ , num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def UpperCamelCase_ ( snake_case_ : Dict ) -> int:
'''simple docstring'''
__lowerCAmelCase = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : List[str] , snake_case_ : Any ) -> Tuple:
'''simple docstring'''
__lowerCAmelCase = dct.pop(snake_case_ )
__lowerCAmelCase = val
def UpperCamelCase_ ( snake_case_ : str , snake_case_ : str ) -> Optional[int]:
'''simple docstring'''
__lowerCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowerCAmelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[:dim, :]
__lowerCAmelCase = in_proj_bias[: dim]
__lowerCAmelCase = in_proj_weight[
dim : dim * 2, :
]
__lowerCAmelCase = in_proj_bias[
dim : dim * 2
]
__lowerCAmelCase = in_proj_weight[
-dim :, :
]
__lowerCAmelCase = in_proj_bias[-dim :]
# fmt: on
def UpperCamelCase_ ( snake_case_ : str ) -> int:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = x.shape
__lowerCAmelCase = x.reshape(snake_case_ , 4 , in_channel // 4 )
__lowerCAmelCase = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
return x
def UpperCamelCase_ ( snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = x.shape
__lowerCAmelCase = x.reshape(snake_case_ , in_channel // 4 , 4 )
__lowerCAmelCase = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
return x
def UpperCamelCase_ ( snake_case_ : Tuple ) -> int:
'''simple docstring'''
__lowerCAmelCase = x.shape[0]
__lowerCAmelCase = x.reshape(4 , in_channel // 4 )
__lowerCAmelCase = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(snake_case_ )
return x
def UpperCamelCase_ ( snake_case_ : Dict ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = x.shape[0]
__lowerCAmelCase = x.reshape(in_channel // 4 , 4 )
__lowerCAmelCase = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(snake_case_ )
return x
def UpperCamelCase_ ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Dict ) -> List[str]:
'''simple docstring'''
__lowerCAmelCase = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
__lowerCAmelCase = model_name_to_url[model_name]
__lowerCAmelCase = torch.hub.load_state_dict_from_url(snake_case_ , map_location="""cpu""" , file_name=snake_case_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(snake_case_ , param.shape )
__lowerCAmelCase = get_upernet_config(snake_case_ )
__lowerCAmelCase = UperNetForSemanticSegmentation(snake_case_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
__lowerCAmelCase = state_dict.pop(snake_case_ )
if "bn" in key:
__lowerCAmelCase = key.replace("""bn""" , """batch_norm""" )
__lowerCAmelCase = val
# rename keys
__lowerCAmelCase = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
__lowerCAmelCase = reverse_correct_unfold_reduction_order(snake_case_ )
if "norm" in key:
__lowerCAmelCase = reverse_correct_unfold_norm_order(snake_case_ )
model.load_state_dict(snake_case_ )
# verify on image
__lowerCAmelCase = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
__lowerCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("""RGB""" )
__lowerCAmelCase = SegformerImageProcessor()
__lowerCAmelCase = processor(snake_case_ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
__lowerCAmelCase = model(snake_case_ )
__lowerCAmelCase = outputs.logits
print(logits.shape )
print("""First values of logits:""" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
__lowerCAmelCase = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] )
elif model_name == "upernet-swin-small":
__lowerCAmelCase = torch.tensor(
[[-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.1_9_2_1, -7.1_9_2_1, -6.9_5_3_2], [-7.0_9_0_8, -7.0_9_0_8, -6.8_5_3_4]] )
elif model_name == "upernet-swin-base":
__lowerCAmelCase = torch.tensor(
[[-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.5_8_5_1, -6.5_8_5_1, -6.4_3_3_0], [-6.4_7_6_3, -6.4_7_6_3, -6.3_2_5_4]] )
elif model_name == "upernet-swin-large":
__lowerCAmelCase = torch.tensor(
[[-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.5_2_9_7, -7.5_2_9_7, -7.3_8_0_2], [-7.4_0_4_4, -7.4_0_4_4, -7.2_5_8_6]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f'upernet-swin-{size}' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_A : int = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 229 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : int = batch_size
__lowercase : Any = seq_length
__lowercase : str = is_training
__lowercase : str = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : int = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : Union[str, Any] = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : Tuple = scope
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
__lowercase : str = model(__a , attention_mask=__a )
__lowercase : List[Any] = model(__a )
__lowercase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowercase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Any = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
(
__lowercase
) : List[str] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = False
_A : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = ()
_A : List[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Optional[Any] = True
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = EsmModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : List[str] = EsmEmbeddings(config=__a )
__lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowercase : int = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : Optional[Any] = EsmEmbeddings(config=__a )
__lowercase : Optional[int] = torch.empty(2 , 4 , 30 )
__lowercase : Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( __a ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[str] = model(__a )[0]
__lowercase : Union[str, Any] = 33
__lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
__lowercase : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : Any = model(__a )[0]
# compare the actual values for a slice.
__lowercase : int = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) ) | 364 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler | 306 | 0 |
def _UpperCAmelCase (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
_A : int = (boundary[1] - boundary[0]) / steps
_A : Any = boundary[0]
_A : List[Any] = boundary[1]
_A : str = make_points(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_A : str = 0.0
y += (h / 2.0) * f(UpperCamelCase__ )
for i in x_i:
# print(i)
y += h * f(UpperCamelCase__ )
y += (h / 2.0) * f(UpperCamelCase__ )
return y
def _UpperCAmelCase (UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ):
_A : Optional[int] = a + h
while x < (b - h):
yield x
_A : Dict = x + h
def _UpperCAmelCase (UpperCamelCase__ : Optional[int] ): # enter your function here
_A : Any = (x - 0) * (x - 0)
return y
def _UpperCAmelCase ():
_A : Optional[Any] = 0.0 # Lower bound of integration
_A : Optional[int] = 1.0 # Upper bound of integration
_A : List[Any] = 10.0 # define number of steps or resolution
_A : Any = [a, b] # define boundary of integration
_A : Tuple = method_a(UpperCamelCase__ , UpperCamelCase__ )
print(f"y = {y}" )
if __name__ == "__main__":
main()
| 11 |
"""simple docstring"""
from itertools import permutations
def lowercase (snake_case__ : tuple ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(snake_case__ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase (snake_case__ : int = 10 ) -> int:
'''simple docstring'''
return sum(
int("""""".join(map(snake_case__ , snake_case__ ) ) )
for num in permutations(range(snake_case__ ) )
if is_substring_divisible(snake_case__ ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 155 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : List[str] = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 82 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a : Optional[Any] = logging.getLogger(__name__)
a : List[Any] = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _a :
A = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Leave None if you want to train a model from'''
''' scratch.'''
)
} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_lowerCAmelCase )} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class _a :
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''The input training data file (a text file).'''} )
A = field(
default=_lowerCAmelCase , metadata={
'''help''': (
'''The input training data files (multiple files in glob format). '''
'''Very often splitting large files to smaller files can prevent tokenizer going out of memory'''
)
} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} )
A = field(default=_lowerCAmelCase , metadata={'''help''': '''Whether ot not to use whole word mask.'''} )
A = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
A = field(
default=1 / 6 , metadata={
'''help''': (
'''Ratio of length of a span of masked tokens to surrounding context length for permutation language'''
''' modeling.'''
)
} , )
A = field(
default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} )
A = field(
default=-1 , metadata={
'''help''': (
'''Optional input sequence length after tokenization.'''
'''The training dataset will be truncated in block of this size for training.'''
'''Default to the model max input length for single sentence inputs (take into account special tokens).'''
)
} , )
A = field(
default=_lowerCAmelCase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def lowerCAmelCase_ (lowerCAmelCase__: DataTrainingArguments , lowerCAmelCase__: PreTrainedTokenizer , lowerCAmelCase__: bool = False , lowerCAmelCase__: Optional[str] = None , ):
"""simple docstring"""
def _dataset(lowerCAmelCase__: int , lowerCAmelCase__: Optional[Any]=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=lowerCAmelCase__ , file_path=lowerCAmelCase__ , block_size=args.block_size , ref_path=lowerCAmelCase__ , )
return LineByLineTextDataset(tokenizer=lowerCAmelCase__ , file_path=lowerCAmelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=lowerCAmelCase__ , file_path=lowerCAmelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=lowerCAmelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(lowerCAmelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
UpperCAmelCase_: Dict = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_: Any = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
UpperCAmelCase_: int = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
UpperCAmelCase_: Optional[int] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
UpperCAmelCase_: List[str] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
UpperCAmelCase_: int = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
UpperCAmelCase_: Union[str, Any] = AutoModelWithLMHead.from_config(lowerCAmelCase__ )
model.resize_token_embeddings(len(lowerCAmelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
UpperCAmelCase_: List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
UpperCAmelCase_: Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
UpperCAmelCase_: str = (
get_dataset(lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
UpperCAmelCase_: List[Any] = (
get_dataset(lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , evaluate=lowerCAmelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
UpperCAmelCase_: Dict = DataCollatorForPermutationLanguageModeling(
tokenizer=lowerCAmelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
UpperCAmelCase_: str = DataCollatorForWholeWordMask(
tokenizer=lowerCAmelCase__ , mlm_probability=data_args.mlm_probability )
else:
UpperCAmelCase_: Optional[int] = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
UpperCAmelCase_: Union[str, Any] = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , prediction_loss_only=lowerCAmelCase__ , )
# Training
if training_args.do_train:
UpperCAmelCase_: Dict = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=lowerCAmelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
UpperCAmelCase_: Union[str, Any] = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCAmelCase_: List[Any] = trainer.evaluate()
UpperCAmelCase_: Optional[Any] = math.exp(eval_output["""eval_loss"""] )
UpperCAmelCase_: Optional[Any] = {"""perplexity""": perplexity}
UpperCAmelCase_: Any = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(lowerCAmelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCAmelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(lowerCAmelCase__ )
return results
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 82 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( A__ ):
def __init__( self : str , _a : List[Any] , _a : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_SCREAMING_SNAKE_CASE =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self : List[Any] , _a : int = 1 , _a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a : float = 0.0 , _a : int = 50 , _a : Optional[bool] = None , _a : Optional[str] = "pil" , _a : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(self.unet.config.sample_size , _a ):
_SCREAMING_SNAKE_CASE =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
_SCREAMING_SNAKE_CASE =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(_a , _a ) and len(_a ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_a )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
_SCREAMING_SNAKE_CASE =randn_tensor(_a , generator=_a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_SCREAMING_SNAKE_CASE =self.unet(_a , _a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_SCREAMING_SNAKE_CASE =self.scheduler.step(
_a , _a , _a , eta=_a , use_clipped_model_output=_a , generator=_a ).prev_sample
_SCREAMING_SNAKE_CASE =(image / 2 + 0.5).clamp(0 , 1 )
_SCREAMING_SNAKE_CASE =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE =self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 47 |
'''simple docstring'''
lowerCamelCase : Any = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowerCamelCase : int = [{"type": "code", "content": INSTALL_CONTENT}]
lowerCamelCase : str = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 47 | 1 |
def _a ( a :int , a :int ) -> int:
return abs(__SCREAMING_SNAKE_CASE ) if a == 0 else greatest_common_divisor(b % a , __SCREAMING_SNAKE_CASE )
def _a ( a :int , a :int ) -> Any:
while y: # --> when y=0 then loop will terminate and return x as final GCD.
a = y, x % y
return abs(__SCREAMING_SNAKE_CASE )
def _a ( ) -> int:
try:
a = input('''Enter two integers separated by comma (,): ''' ).split(''',''' )
a = int(nums[0] )
a = int(nums[1] )
print(
F"""greatest_common_divisor({num_a}, {num_a}) = """
F"""{greatest_common_divisor(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}""" )
print(F"""By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}""" )
except (IndexError, UnboundLocalError, ValueError):
print('''Wrong input''' )
if __name__ == "__main__":
main()
| 370 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
a = tempfile.mkdtemp()
a = BlipImageProcessor()
a = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
a = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
a = InstructBlipProcessor(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Tuple ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def __lowerCAmelCase ( self : int , **__UpperCAmelCase : str ) ->List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def __lowerCAmelCase ( self : Optional[Any] , **__UpperCAmelCase : Any ) ->Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).qformer_tokenizer
def __lowerCAmelCase ( self : str ) ->Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
a = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
a = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
a = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer , __UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = self.prepare_image_inputs()
a = image_processor(__UpperCAmelCase , return_tensors='''np''' )
a = processor(images=__UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = processor(text=__UpperCAmelCase )
a = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
a = qformer_tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def __lowerCAmelCase ( self : Dict ) ->List[Any]:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a = processor.batch_decode(__UpperCAmelCase )
a = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
a = self.get_image_processor()
a = self.get_tokenizer()
a = self.get_qformer_tokenizer()
a = InstructBlipProcessor(
tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase , qformer_tokenizer=__UpperCAmelCase )
a = '''lower newer'''
a = self.prepare_image_inputs()
a = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
| 26 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : List[Any] , _A : Dict , _A : int=13 , _A : Any=30 , _A : List[str]=2 , _A : Any=3 , _A : List[Any]=True , _A : Tuple=True , _A : Union[str, Any]=32 , _A : Tuple=2 , _A : str=4 , _A : Optional[int]=37 , _A : Optional[int]="gelu" , _A : Dict=0.1 , _A : Union[str, Any]=0.1 , _A : str=10 , _A : Dict=0.02 , _A : Optional[int]=3 , _A : Any=None , ) -> Tuple:
__magic_name__ : List[str] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = image_size
__magic_name__ : List[Any] = patch_size
__magic_name__ : Optional[Any] = num_channels
__magic_name__ : List[Any] = is_training
__magic_name__ : Tuple = use_labels
__magic_name__ : Tuple = hidden_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : int = intermediate_size
__magic_name__ : int = hidden_act
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : Dict = type_sequence_label_size
__magic_name__ : str = initializer_range
__magic_name__ : List[str] = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ : Optional[int] = (image_size // patch_size) ** 2
__magic_name__ : Any = num_patches + 1
def __lowerCAmelCase ( self : Tuple ) -> Any:
__magic_name__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Optional[int] = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Tuple = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : Any ) -> Dict:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self : Optional[Any] , _A : Union[str, Any] , _A : Dict , _A : List[str] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = TFViTModel(config=_A )
__magic_name__ : Optional[Any] = model(_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__magic_name__ : str = self.image_size // 2
__magic_name__ : Union[str, Any] = pixel_values[:, :, :image_size, :image_size]
__magic_name__ : Optional[Any] = model(_A , interpolate_pos_encoding=_A , training=_A )
__magic_name__ : Tuple = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Tuple , _A : str , _A : List[str] , _A : int ) -> int:
__magic_name__ : Optional[Any] = self.type_sequence_label_size
__magic_name__ : Tuple = TFViTForImageClassification(_A )
__magic_name__ : Dict = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__magic_name__ : List[Any] = self.image_size // 2
__magic_name__ : Union[str, Any] = pixel_values[:, :, :image_size, :image_size]
__magic_name__ : Tuple = model(_A , interpolate_pos_encoding=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : List[Any] = 1
__magic_name__ : Any = TFViTForImageClassification(_A )
__magic_name__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
__magic_name__ : List[Any] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = config_and_inputs
__magic_name__ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : str = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
A_ : Any = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
A_ : Union[str, Any] = False
A_ : Optional[Any] = False
A_ : str = False
def __lowerCAmelCase ( self : Optional[Any] ) -> int:
__magic_name__ : Optional[Any] = TFViTModelTester(self )
__magic_name__ : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __lowerCAmelCase ( self : List[str] ) -> Any:
pass
def __lowerCAmelCase ( self : int ) -> Optional[int]:
__magic_name__ , __magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[str] = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__magic_name__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Layer ) )
def __lowerCAmelCase ( self : Dict ) -> int:
__magic_name__ , __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[Any] = model_class(_A )
__magic_name__ : Tuple = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : List[Any] = [*signature.parameters.keys()]
__magic_name__ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : List[str] = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_A )
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __lowerCAmelCase ( self : int ) -> Dict:
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
__magic_name__ : List[str] = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
__magic_name__ : str = self.default_image_processor
__magic_name__ : Optional[Any] = prepare_img()
__magic_name__ : str = image_processor(images=_A , return_tensors='tf' )
# forward pass
__magic_name__ : Dict = model(**_A )
# verify the logits
__magic_name__ : Optional[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _A )
__magic_name__ : Optional[int] = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , _A , atol=1E-4 ) | 331 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Dict = ["""flax""", """transformers"""]
def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = ["""flax""", """transformers"""]
def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] ) | 331 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Tuple:
a : Any = parent
a : Union[str, Any] = 13
a : Optional[int] = 7
a : List[Any] = True
a : Tuple = True
a : Optional[int] = True
a : Tuple = True
a : List[Any] = 99
a : Union[str, Any] = 32
a : Optional[int] = 2
a : Union[str, Any] = 4
a : List[Any] = 37
a : List[Any] = "gelu"
a : Union[str, Any] = 0.1
a : Tuple = 0.1
a : Optional[int] = 512
a : Dict = 16
a : str = 2
a : List[str] = 0.02
a : List[Any] = 3
a : Any = 4
a : Optional[Any] = None
def __a ( self ) -> Tuple:
a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Tuple = None
if self.use_input_mask:
a : Dict = random_attention_mask([self.batch_size, self.seq_length] )
a : Optional[int] = None
if self.use_token_type_ids:
a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : Union[str, Any] = None
a : str = None
a : str = None
if self.use_labels:
a : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : Any = ids_tensor([self.batch_size] , self.num_choices )
a : Dict = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : List[Any] = TFRoFormerModel(config=lowerCAmelCase__ )
a : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
a : int = [input_ids, input_mask]
a : str = model(lowerCAmelCase__ )
a : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Dict = True
a : Any = TFRoFormerForCausalLM(config=lowerCAmelCase__ )
a : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a : Optional[int] = model(lowerCAmelCase__ )["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
a : Dict = TFRoFormerForMaskedLM(config=lowerCAmelCase__ )
a : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : Optional[int] = self.num_labels
a : Any = TFRoFormerForSequenceClassification(config=lowerCAmelCase__ )
a : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a : int = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Union[str, Any] = self.num_choices
a : Union[str, Any] = TFRoFormerForMultipleChoice(config=lowerCAmelCase__ )
a : List[str] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
a : Optional[Any] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
a : Union[str, Any] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
a : Any = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
a : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : List[str] = self.num_labels
a : str = TFRoFormerForTokenClassification(config=lowerCAmelCase__ )
a : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : Tuple = TFRoFormerForQuestionAnswering(config=lowerCAmelCase__ )
a : int = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
a : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self ) -> List[Any]:
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
), (
a
), (
a
), (
a
), (
a
), (
a
), (
a
),
) : Optional[int] = config_and_inputs
a : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : str =(
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase : str =(
{
"""feature-extraction""": TFRoFormerModel,
"""fill-mask""": TFRoFormerForMaskedLM,
"""question-answering""": TFRoFormerForQuestionAnswering,
"""text-classification""": TFRoFormerForSequenceClassification,
"""text-generation""": TFRoFormerForCausalLM,
"""token-classification""": TFRoFormerForTokenClassification,
"""zero-shot""": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase : Any =False
lowerCamelCase : Dict =False
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __a ( self ) -> Optional[Any]:
a : Any = TFRoFormerModelTester(self )
a : List[str] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def __a ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __a ( self ) -> Tuple:
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def __a ( self ) -> List[str]:
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def __a ( self ) -> str:
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def __a ( self ) -> int:
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def __a ( self ) -> str:
a : Union[str, Any] = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" )
self.assertIsNotNone(lowerCAmelCase__ )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Union[str, Any]:
a : str = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
a : Optional[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
a : Dict = model(lowerCAmelCase__ )[0]
# TODO Replace vocab size
a : int = 5_0000
a : List[str] = [1, 6, vocab_size]
self.assertEqual(output.shape , lowerCAmelCase__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
a : Dict = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : Dict =1e-4
def __a ( self ) -> Tuple:
a : str = tf.constant([[4, 10]] )
a : Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
a : List[Any] = emba(input_ids.shape )
a : str = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , atol=self.tolerance )
def __a ( self ) -> Union[str, Any]:
a : Union[str, Any] = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
a : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
a : Tuple = emba.weight[:3, :5]
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , atol=self.tolerance )
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : Tuple =1e-4
def __a ( self ) -> Union[str, Any]:
# 2,12,16,64
a : List[Any] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
a : Optional[int] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
a : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
a : str = embed_positions([2, 16, 768] )[None, None, :, :]
a, a : Tuple = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
a : str = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
a : int = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , lowerCAmelCase__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , lowerCAmelCase__ , atol=self.tolerance )
| 79 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : List[Any] = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Tuple ="""open-llama"""
def __init__( self , lowerCAmelCase__=10_0000 , lowerCAmelCase__=4096 , lowerCAmelCase__=1_1008 , lowerCAmelCase__=32 , lowerCAmelCase__=32 , lowerCAmelCase__="silu" , lowerCAmelCase__=2048 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-6 , lowerCAmelCase__=True , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Tuple:
a : Any = vocab_size
a : List[str] = max_position_embeddings
a : int = hidden_size
a : str = intermediate_size
a : List[str] = num_hidden_layers
a : int = num_attention_heads
a : Dict = hidden_act
a : Union[str, Any] = initializer_range
a : Tuple = rms_norm_eps
a : Union[str, Any] = use_cache
a : Union[str, Any] = kwargs.pop(
"use_memorry_efficient_attention" , lowerCAmelCase__ )
a : int = hidden_dropout_prob
a : Tuple = attention_dropout_prob
a : Optional[Any] = use_stable_embedding
a : str = shared_input_output_embedding
a : str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__ , )
def __a ( self ) -> Union[str, Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
a : Any = self.rope_scaling.get("type" , lowerCAmelCase__ )
a : List[str] = self.rope_scaling.get("factor" , lowerCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 79 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =params
__UpperCamelCase =np.array(A_ )
__UpperCamelCase =np.array([len(A_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A_ ) -> List[str]:
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Optional[Any]:
return len(self.lengths )
def _a ( self ) -> Any:
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.params.max_model_input_size
__UpperCamelCase =self.lengths > max_len
logger.info(f'Splitting {sum(A_ )} too long sequences.' )
def divide_chunks(A_ , A_ ):
return [l[i : i + n] for i in range(0 , len(A_ ) , A_ )]
__UpperCamelCase =[]
__UpperCamelCase =[]
if self.params.mlm:
__UpperCamelCase , __UpperCamelCase =self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
__UpperCamelCase , __UpperCamelCase =self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
__UpperCamelCase =[]
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
__UpperCamelCase =np.insert(A_ , 0 , A_ )
if sub_s[-1] != sep_id:
__UpperCamelCase =np.insert(A_ , len(A_ ) , A_ )
assert len(A_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A_ )
new_tok_ids.extend(A_ )
new_lengths.extend([len(A_ ) for l in sub_seqs] )
__UpperCamelCase =np.array(A_ )
__UpperCamelCase =np.array(A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =len(self )
__UpperCamelCase =self.lengths > 11
__UpperCamelCase =self.token_ids[indices]
__UpperCamelCase =self.lengths[indices]
__UpperCamelCase =len(self )
logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def _a ( self ) -> int:
if "unk_token" not in self.params.special_tok_ids:
return
else:
__UpperCamelCase =self.params.special_tok_ids['unk_token']
__UpperCamelCase =len(self )
__UpperCamelCase =np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
__UpperCamelCase =(unk_occs / self.lengths) < 0.5
__UpperCamelCase =self.token_ids[indices]
__UpperCamelCase =self.lengths[indices]
__UpperCamelCase =len(self )
logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def _a ( self ) -> int:
if not self.params.is_master:
return
logger.info(f'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _a ( self , A_ ) -> int:
__UpperCamelCase =[t[0] for t in batch]
__UpperCamelCase =[t[1] for t in batch]
assert len(A_ ) == len(A_ )
# Max for paddings
__UpperCamelCase =max(A_ )
# Pad token ids
if self.params.mlm:
__UpperCamelCase =self.params.special_tok_ids['pad_token']
else:
__UpperCamelCase =self.params.special_tok_ids['unk_token']
__UpperCamelCase =[list(t.astype(A_ ) ) + [pad_idx] * (max_seq_len_ - len(A_ )) for t in token_ids]
assert len(tk_ ) == len(A_ )
assert all(len(A_ ) == max_seq_len_ for t in tk_ )
__UpperCamelCase =torch.tensor(tk_ ) # (bs, max_seq_len_)
__UpperCamelCase =torch.tensor(A_ ) # (bs)
return tk_t, lg_t
| 62 |
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )]
for i in range(_UpperCamelCase ):
for j in range(_UpperCamelCase ):
snake_case_ : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_UpperCamelCase ):
# looping through rows of graph array
for i in range(_UpperCamelCase ):
# looping through columns of graph array
for j in range(_UpperCamelCase ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
snake_case_ : List[Any] = dist[i][k] + dist[k][j]
_print_dist(_UpperCamelCase , _UpperCamelCase )
return dist, v
if __name__ == "__main__":
lowerCAmelCase_ = int(input('''Enter number of vertices: '''))
lowerCAmelCase_ = int(input('''Enter number of edges: '''))
lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
lowerCAmelCase_ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
lowerCAmelCase_ = int(input('''Enter source:'''))
lowerCAmelCase_ = int(input('''Enter destination:'''))
lowerCAmelCase_ = float(input('''Enter weight:'''))
lowerCAmelCase_ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 279 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __A , __A=7 , __A=3 , __A=18 , __A=30 , __A=400 , __A=True , __A=None , __A=True , __A=None , __A=True , __A=[0.5, 0.5, 0.5] , __A=[0.5, 0.5, 0.5] , ) -> int:
lowerCAmelCase_ :Optional[Any] = size if size is not None else {"""shortest_edge""": 18}
lowerCAmelCase_ :str = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
lowerCAmelCase_ :Tuple = parent
lowerCAmelCase_ :int = batch_size
lowerCAmelCase_ :Union[str, Any] = num_channels
lowerCAmelCase_ :Dict = image_size
lowerCAmelCase_ :Optional[int] = min_resolution
lowerCAmelCase_ :Union[str, Any] = max_resolution
lowerCAmelCase_ :Any = do_resize
lowerCAmelCase_ :str = size
lowerCAmelCase_ :Tuple = do_center_crop
lowerCAmelCase_ :Union[str, Any] = crop_size
lowerCAmelCase_ :str = do_normalize
lowerCAmelCase_ :int = image_mean
lowerCAmelCase_ :Tuple = image_std
def __lowerCAmelCase ( self ) -> Optional[int]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :List[str] = LevitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :str = LevitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , """image_mean""" ) )
self.assertTrue(hasattr(__A , """image_std""" ) )
self.assertTrue(hasattr(__A , """do_normalize""" ) )
self.assertTrue(hasattr(__A , """do_resize""" ) )
self.assertTrue(hasattr(__A , """do_center_crop""" ) )
self.assertTrue(hasattr(__A , """size""" ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
lowerCAmelCase_ :Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __lowerCAmelCase ( self ) -> List[str]:
pass
def __lowerCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase_ :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
lowerCAmelCase_ :Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase_ :Tuple = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowerCAmelCase ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ :str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
lowerCAmelCase_ :List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase_ :Optional[int] = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __lowerCAmelCase ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase_ :Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
lowerCAmelCase_ :Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
lowerCAmelCase_ :int = image_processing(__A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 353 |
"""simple docstring"""
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> int:
'''simple docstring'''
if index == number_of_items:
return 0
lowerCAmelCase_ :Any = 0
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Dict = knapsack(lowercase__ , lowercase__ , lowercase__ , lowercase__ , index + 1 )
if weights[index] <= max_weight:
lowerCAmelCase_ :str = values[index] + knapsack(
lowercase__ , lowercase__ , lowercase__ , max_weight - weights[index] , index + 1 )
return max(lowercase__ , lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__(self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Tuple=13 , UpperCamelCase : Dict=30 , UpperCamelCase : int=2 , UpperCamelCase : Any=3 , UpperCamelCase : List[Any]=True , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=32 , UpperCamelCase : Optional[int]=2 , UpperCamelCase : str=4 , UpperCamelCase : Any=37 , UpperCamelCase : Any="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : str=10 , UpperCamelCase : Any=0.02 , UpperCamelCase : str=3 , UpperCamelCase : Optional[int]=None , ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowercase__ = TFViTModel(config=UpperCamelCase )
lowercase__ = model(UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowercase__ = self.image_size // 2
lowercase__ = pixel_values[:, :, :image_size, :image_size]
lowercase__ = model(UpperCamelCase , interpolate_pos_encoding=UpperCamelCase , training=UpperCamelCase )
lowercase__ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase__ (self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.type_sequence_label_size
lowercase__ = TFViTForImageClassification(UpperCamelCase )
lowercase__ = model(UpperCamelCase , labels=UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowercase__ = self.image_size // 2
lowercase__ = pixel_values[:, :, :image_size, :image_size]
lowercase__ = model(UpperCamelCase , interpolate_pos_encoding=UpperCamelCase , training=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = TFViTForImageClassification(UpperCamelCase )
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ ,lowercase__ ,lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase (lowercase_ , lowercase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ : Tuple = (
{"""feature-extraction""": TFViTModel, """image-classification""": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : List[str] = False
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
lowercase__ = TFViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def UpperCamelCase__ (self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase__ (self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase , tf.keras.layers.Layer ) )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ ,lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase )
lowercase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
lowercase__ = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(UpperCamelCase )
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase , return_tensors='''tf''' )
# forward pass
lowercase__ = model(**UpperCamelCase )
# verify the logits
lowercase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
lowercase__ = tf.constant([-0.27_44, 0.82_15, -0.08_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , UpperCamelCase , atol=1E-4 )
| 2 |
from sklearn.metrics import fa_score
import datasets
__lowerCamelCase : List[Any] = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
__lowerCamelCase : List[Any] = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
__lowerCamelCase : str = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def __UpperCamelCase( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def __UpperCamelCase( self , A_ , A_ , A_=None , A_=1 , A_="binary" , A_=None ):
'''simple docstring'''
UpperCamelCase : List[str] = fa_score(
A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ )
return {"f1": float(A_ ) if score.size == 1 else score}
| 52 | 0 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def UpperCamelCase_ ( A__ : List[str] ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowerCAmelCase_ : Optional[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creates a copy of the matrix with swapped positions of the elements
lowerCAmelCase_ : Union[str, Any] = [[0.0, 0.0], [0.0, 0.0]]
lowerCAmelCase_, lowerCAmelCase_ : List[str] = matrix[1][1], matrix[0][0]
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowerCAmelCase_ : Union[str, Any] = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("""This matrix has no inverse.""" )
# Creating cofactor matrix
lowerCAmelCase_ : Dict = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowerCAmelCase_ : Dict = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowerCAmelCase_ : Dict = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowerCAmelCase_ : Optional[Any] = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowerCAmelCase_ : Dict = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowerCAmelCase_ : Dict = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowerCAmelCase_ : List[str] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowerCAmelCase_ : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowerCAmelCase_ : List[str] = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowerCAmelCase_ : Tuple = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowerCAmelCase_ : List[Any] = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
lowerCAmelCase_ : Optional[int] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowerCAmelCase_ : Any = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""" )
| 358 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_ ( A__ : List[Any] , A__ : str=False ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
lowerCAmelCase_ : str = """segformer.encoder.""" + key
if key.startswith("""backbone""" ):
lowerCAmelCase_ : str = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ : List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase_ : List[Any] = key.replace(f'patch_embed{idx}' , f'patch_embeddings.{int(A__ )-1}' )
if "norm" in key:
lowerCAmelCase_ : Any = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ : Tuple = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
lowerCAmelCase_ : int = key.replace(f'layer_norm{idx}' , f'layer_norm.{int(A__ )-1}' )
if "layer_norm1" in key:
lowerCAmelCase_ : str = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase_ : Union[str, Any] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ : Any = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase_ : str = key.replace(f'block{idx}' , f'block.{int(A__ )-1}' )
if "attn.q" in key:
lowerCAmelCase_ : List[Any] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase_ : Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase_ : str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase_ : List[Any] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase_ : Optional[Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase_ : Any = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ : str = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase_ : Dict = key.replace(f'linear_c{idx}' , f'linear_c.{int(A__ )-1}' )
if key.startswith("""head""" ):
lowerCAmelCase_ : int = key.replace("""head""" , """classifier""" )
lowerCAmelCase_ : int = value
return new_state_dict
def UpperCamelCase_ ( A__ : int , A__ : Union[str, Any] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ : int = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase_ : Optional[int] = state_dict.pop(f'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ : List[str] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ : Optional[Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ : Union[str, Any] = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ : str = kv_bias[
config.hidden_sizes[i] :
]
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ : Optional[Any] = Image.open(requests.get(A__ , stream=A__ ).raw )
return image
@torch.no_grad()
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : List[Any] , A__ : Tuple ):
'''simple docstring'''
lowerCAmelCase_ : str = SegformerConfig()
lowerCAmelCase_ : Optional[Any] = False
# set attributes based on model_name
lowerCAmelCase_ : int = """huggingface/label-files"""
if "segformer" in model_name:
lowerCAmelCase_ : Optional[int] = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
lowerCAmelCase_ : List[Any] = 1_50
lowerCAmelCase_ : int = """ade20k-id2label.json"""
lowerCAmelCase_ : Tuple = (1, 1_50, 1_28, 1_28)
elif "city" in model_name:
lowerCAmelCase_ : List[str] = 19
lowerCAmelCase_ : Dict = """cityscapes-id2label.json"""
lowerCAmelCase_ : List[str] = (1, 19, 1_28, 1_28)
else:
raise ValueError(f'Model {model_name} not supported' )
elif "mit" in model_name:
lowerCAmelCase_ : Dict = True
lowerCAmelCase_ : Optional[int] = model_name[4:6]
lowerCAmelCase_ : Union[str, Any] = 10_00
lowerCAmelCase_ : int = """imagenet-1k-id2label.json"""
lowerCAmelCase_ : Optional[Any] = (1, 10_00)
else:
raise ValueError(f'Model {model_name} not supported' )
# set config attributes
lowerCAmelCase_ : Optional[Any] = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ : List[Any] = {int(A__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ : List[str] = idalabel
lowerCAmelCase_ : List[Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowerCAmelCase_ : Any = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : int = 2_56
elif size == "b2":
lowerCAmelCase_ : Any = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : List[str] = 7_68
lowerCAmelCase_ : Any = [3, 4, 6, 3]
elif size == "b3":
lowerCAmelCase_ : List[str] = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : Union[str, Any] = 7_68
lowerCAmelCase_ : Union[str, Any] = [3, 4, 18, 3]
elif size == "b4":
lowerCAmelCase_ : Tuple = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : Tuple = 7_68
lowerCAmelCase_ : Tuple = [3, 8, 27, 3]
elif size == "b5":
lowerCAmelCase_ : Union[str, Any] = [64, 1_28, 3_20, 5_12]
lowerCAmelCase_ : str = 7_68
lowerCAmelCase_ : Any = [3, 6, 40, 3]
else:
raise ValueError(f'Size {size} not supported' )
# load image processor (only resize + normalize)
lowerCAmelCase_ : List[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=A__ , align=A__ , do_random_crop=A__ )
# prepare image
lowerCAmelCase_ : Optional[Any] = prepare_img()
lowerCAmelCase_ : Union[str, Any] = image_processor(images=A__ , return_tensors="""pt""" ).pixel_values
logger.info(f'Converting model {model_name}...' )
# load original state dict
if encoder_only:
lowerCAmelCase_ : str = torch.load(A__ , map_location=torch.device("""cpu""" ) )
else:
lowerCAmelCase_ : List[str] = torch.load(A__ , map_location=torch.device("""cpu""" ) )["""state_dict"""]
# rename keys
lowerCAmelCase_ : Dict = rename_keys(A__ , encoder_only=A__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(A__ , A__ )
# create HuggingFace model and load state dict
if encoder_only:
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : List[Any] = SegformerForImageClassification(A__ )
else:
lowerCAmelCase_ : str = SegformerForSemanticSegmentation(A__ )
model.load_state_dict(A__ )
model.eval()
# forward pass
lowerCAmelCase_ : Tuple = model(A__ )
lowerCAmelCase_ : Union[str, Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowerCAmelCase_ : Tuple = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowerCAmelCase_ : List[str] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowerCAmelCase_ : List[str] = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowerCAmelCase_ : Dict = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowerCAmelCase_ : Dict = torch.tensor(
[
[
[-1.13_72E01, -1.27_87E01, -1.34_77E01],
[-1.25_36E01, -1.41_94E01, -1.44_09E01],
[-1.32_17E01, -1.48_88E01, -1.53_27E01],
],
[
[-1.47_91E01, -1.71_22E01, -1.82_77E01],
[-1.71_63E01, -1.91_92E01, -1.95_33E01],
[-1.78_97E01, -1.99_91E01, -2.03_15E01],
],
[
[7.67_23E-01, 4.19_21E-01, -7.78_78E-02],
[4.77_72E-01, 9.55_57E-03, -2.80_82E-01],
[3.60_32E-01, -2.48_26E-01, -5.11_68E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowerCAmelCase_ : str = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowerCAmelCase_ : Optional[int] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowerCAmelCase_ : int = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowerCAmelCase_ : Union[str, Any] = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowerCAmelCase_ : List[Any] = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
lowerCAmelCase_ : Optional[Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , A__ , atol=1E-2 )
# finally, save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
image_processor.save_pretrained(A__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__A : Tuple = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 89 | 0 |
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Optional[int] = []
lowerCamelCase : List[str] = 1
while len(SCREAMING_SNAKE_CASE_ ) < 1E6:
constant.append(str(SCREAMING_SNAKE_CASE_ ) )
i += 1
lowerCamelCase : Union[str, Any] = "".join(SCREAMING_SNAKE_CASE_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ):
'''simple docstring'''
lowerCamelCase : Any = [0, 1]
lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase : Union[str, Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __A( unittest.TestCase ):
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=400 , _snake_case=True , _snake_case=32 , _snake_case=True , ) -> List[str]:
'''simple docstring'''
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size_divisor
__a = do_rescale
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __A( a , unittest.TestCase ):
snake_case_ = GLPNImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a = GLPNImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(_snake_case , '''size_divisor''' ) )
self.assertTrue(hasattr(_snake_case , '''resample''' ) )
self.assertTrue(hasattr(_snake_case , '''do_rescale''' ) )
def SCREAMING_SNAKE_CASE_ ( self ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[Any]:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_snake_case , torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) | 33 |
from string import ascii_uppercase
A : Optional[int] = {char: i for i, char in enumerate(ascii_uppercase)}
A : Union[str, Any] = dict(enumerate(ascii_uppercase))
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = len(a__ )
__a = 0
while True:
if x == i:
__a = 0
if len(a__ ) == len(a__ ):
break
key += key[i]
i += 1
return key
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = ''''''
__a = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__a = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __lowerCAmelCase ( a__ , a__ ) -> str:
__a = ''''''
__a = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__a = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __lowerCAmelCase ( ) -> None:
__a = '''THE GERMAN ATTACK'''
__a = '''SECRET'''
__a = generate_key(a__ , a__ )
__a = cipher_text(a__ , a__ )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(a__ , a__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 33 | 1 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
a_ : List[str] = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
a_ : int = parser.parse_args()
if args.model_type == "bert":
a_ : List[str] = BertForMaskedLM.from_pretrained(args.model_name)
a_ : int = 'bert'
else:
raise ValueError('args.model_type should be \"bert\".')
a_ : Union[str, Any] = model.state_dict()
a_ : Optional[int] = {}
for w in ["word_embeddings", "position_embeddings"]:
a_ : List[Any] = state_dict[f"""{prefix}.embeddings.{w}.weight"""]
for w in ["weight", "bias"]:
a_ : str = state_dict[f"""{prefix}.embeddings.LayerNorm.{w}"""]
a_ : Optional[Any] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
a_ : Tuple = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"""
]
a_ : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"""
]
a_ : Tuple = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"""
]
a_ : Optional[int] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"""
]
a_ : Tuple = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"""
]
a_ : Tuple = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"""
]
a_ : Any = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"""
]
a_ : int = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"""
]
std_idx += 1
a_ : Optional[Any] = state_dict['cls.predictions.decoder.weight']
a_ : List[Any] = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
a_ : str = state_dict[f"""cls.predictions.transform.dense.{w}"""]
a_ : Union[str, Any] = state_dict[f"""cls.predictions.transform.LayerNorm.{w}"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 137 |
from __future__ import annotations
from typing import TypedDict
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : str
snake_case_ : int
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> list[str]:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
def lowercase ( SCREAMING_SNAKE_CASE__ : str ) -> BWTTransformDict:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter s type must be str.""" )
if not s:
raise ValueError("""The parameter s must not be empty.""" )
_snake_case : Union[str, Any] = all_rotations(SCREAMING_SNAKE_CASE__ )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
_snake_case : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(SCREAMING_SNAKE_CASE__ ),
}
return response
def lowercase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ) -> str:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""The parameter bwt_string type must be str.""" )
if not bwt_string:
raise ValueError("""The parameter bwt_string must not be empty.""" )
try:
_snake_case : Tuple = int(SCREAMING_SNAKE_CASE__ )
except ValueError:
raise TypeError(
"""The parameter idx_original_string type must be int or passive"""
""" of cast to int.""" )
if idx_original_string < 0:
raise ValueError("""The parameter idx_original_string must not be lower than 0.""" )
if idx_original_string >= len(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""The parameter idx_original_string must be lower than""" """ len(bwt_string).""" )
_snake_case : List[str] = [""""""] * len(SCREAMING_SNAKE_CASE__ )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
_snake_case : Union[str, Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
a__ = """Provide a string that I will generate its BWT transform: """
a__ = input(entry_msg).strip()
a__ = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result['bwt_string']}\''''
)
a__ = reverse_bwt(result["""bwt_string"""], result["""idx_original_string"""])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result['bwt_string']}\' '''
F'''we get original string \'{original_string}\''''
)
| 317 | 0 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _A (lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
_a = 0
if start < end:
_a = randint(lowerCAmelCase__ , lowerCAmelCase__ )
_a = a[end]
_a = a[pivot]
_a = temp
_a , _a = _in_place_partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
count += _in_place_quick_sort(lowerCAmelCase__ , lowerCAmelCase__ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase__ , p + 1 , lowerCAmelCase__ )
return count
def _A (lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
_a = 0
_a = randint(lowerCAmelCase__ , lowerCAmelCase__ )
_a = a[end]
_a = a[pivot]
_a = temp
_a = start - 1
for index in range(lowerCAmelCase__ , lowerCAmelCase__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a = new_pivot_index + 1
_a = a[new_pivot_index]
_a = a[index]
_a = temp
_a = a[new_pivot_index + 1]
_a = a[end]
_a = temp
return new_pivot_index + 1, count
a_ : List[Any] = TemporaryFile()
a_ : Union[str, Any] = 1_0_0 # 1000 elements are to be sorted
a_ : Dict = 0, 1 # mean and standard deviation
a_ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
a_ : Optional[int] = np.load(outfile)
a_ : List[Any] = len(M) - 1
a_ : str = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 351 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : str = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
a_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 104 | 0 |
from __future__ import annotations
_UpperCAmelCase : Optional[int] = tuple[int, int, int]
_UpperCAmelCase : int = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_UpperCAmelCase : Optional[int] = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
# -------------------------- default selection --------------------------
# rotors --------------------------
_UpperCAmelCase : List[str] = """EGZWVONAHDCLFQMSIPJBYUKXTR"""
_UpperCAmelCase : Optional[int] = """FOBHMDKEXQNRAULPGSJVTYICZW"""
_UpperCAmelCase : List[Any] = """ZJXESIUQLHAVRMDOYGTNFWPBKC"""
# reflector --------------------------
_UpperCAmelCase : Union[str, Any] = {
"""A""": """N""",
"""N""": """A""",
"""B""": """O""",
"""O""": """B""",
"""C""": """P""",
"""P""": """C""",
"""D""": """Q""",
"""Q""": """D""",
"""E""": """R""",
"""R""": """E""",
"""F""": """S""",
"""S""": """F""",
"""G""": """T""",
"""T""": """G""",
"""H""": """U""",
"""U""": """H""",
"""I""": """V""",
"""V""": """I""",
"""J""": """W""",
"""W""": """J""",
"""K""": """X""",
"""X""": """K""",
"""L""": """Y""",
"""Y""": """L""",
"""M""": """Z""",
"""Z""": """M""",
}
# -------------------------- extra rotors --------------------------
_UpperCAmelCase : List[str] = """RMDJXFUWGISLHVTCQNKYPBEZOA"""
_UpperCAmelCase : Any = """SGLCPQWZHKXAREONTFBVIYJUDM"""
_UpperCAmelCase : List[str] = """HVSICLTYKQUBXDWAJZOMFGPREN"""
_UpperCAmelCase : Tuple = """RZWQHFMVDBKICJLNTUXAGYPSOE"""
_UpperCAmelCase : Optional[int] = """LFKIJODBEGAMQPXVUHYSTCZRWN"""
_UpperCAmelCase : Tuple = """KOAEGVDHXPQZMLFTYWJNBRCIUS"""
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_UpperCAmelCase ) )) < 3:
lowerCamelCase__ : int = F"""Please use 3 unique rotors (not {unique_rotsel})"""
raise Exception(_UpperCAmelCase )
# Checks if rotor positions are valid
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = rotpos
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowerCamelCase__ : List[Any] = F"""First rotor position is not within range of 1..26 ({rotorposa}"""
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowerCamelCase__ : List[Any] = F"""Second rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_UpperCAmelCase )
if not 0 < rotorposa <= len(_UpperCAmelCase ):
lowerCamelCase__ : int = F"""Third rotor position is not within range of 1..26 ({rotorposa})"""
raise ValueError(_UpperCAmelCase )
# Validates string and returns dict
lowerCamelCase__ : Optional[int] = _plugboard(_UpperCAmelCase )
return rotpos, rotsel, pbdict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = F"""Plugboard setting isn't type string ({type(_UpperCAmelCase )})"""
raise TypeError(_UpperCAmelCase )
elif len(_UpperCAmelCase ) % 2 != 0:
lowerCamelCase__ : Dict = F"""Odd number of symbols ({len(_UpperCAmelCase )})"""
raise Exception(_UpperCAmelCase )
elif pbstring == "":
return {}
pbstring.replace(' ' , '' )
# Checks if all characters are unique
lowerCamelCase__ : Dict = set()
for i in pbstring:
if i not in abc:
lowerCamelCase__ : Union[str, Any] = F"""'{i}' not in list of symbols"""
raise Exception(_UpperCAmelCase )
elif i in tmppbl:
lowerCamelCase__ : Optional[Any] = F"""Duplicate symbol ({i})"""
raise Exception(_UpperCAmelCase )
else:
tmppbl.add(_UpperCAmelCase )
del tmppbl
# Created the dictionary
lowerCamelCase__ : Dict = {}
for j in range(0 , len(_UpperCAmelCase ) - 1 , 2 ):
lowerCamelCase__ : int = pbstring[j + 1]
lowerCamelCase__ : Union[str, Any] = pbstring[j]
return pb
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = (rotora, rotora, rotora) , _UpperCAmelCase = "" , ) -> str:
lowerCamelCase__ : List[Any] = text.upper()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = _validator(
_UpperCAmelCase , _UpperCAmelCase , plugb.upper() )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = rotor_position
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
lowerCamelCase__ : Dict = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
lowerCamelCase__ : Tuple = plugboard[symbol]
# rotor ra --------------------------
lowerCamelCase__ : Optional[Any] = abc.index(_UpperCAmelCase ) + rotorposa
lowerCamelCase__ : int = rotora[index % len(_UpperCAmelCase )]
# rotor rb --------------------------
lowerCamelCase__ : Dict = abc.index(_UpperCAmelCase ) + rotorposa
lowerCamelCase__ : Optional[Any] = rotora[index % len(_UpperCAmelCase )]
# rotor rc --------------------------
lowerCamelCase__ : str = abc.index(_UpperCAmelCase ) + rotorposa
lowerCamelCase__ : Optional[Any] = rotora[index % len(_UpperCAmelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
lowerCamelCase__ : List[Any] = reflector[symbol]
# 2nd rotors
lowerCamelCase__ : Union[str, Any] = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
lowerCamelCase__ : Tuple = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
lowerCamelCase__ : str = abc[rotora.index(_UpperCAmelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
lowerCamelCase__ : Dict = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(_UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_UpperCAmelCase )
return "".join(_UpperCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = """This is my Python script that emulates the Enigma machine from WWII."""
_UpperCAmelCase : List[Any] = (1, 1, 1)
_UpperCAmelCase : List[Any] = """pictures"""
_UpperCAmelCase : int = (rotora, rotora, rotora)
_UpperCAmelCase : Any = enigma(message, rotor_pos, rotor_sel, pb)
print("""Encrypted message:""", en)
print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
| 50 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*A_ : List[str] ,**A_ : int ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' ,A_ ,)
super().__init__(*A_ ,**A_ ) | 74 | 0 |
from collections.abc import Sequence
def lowerCAmelCase_ ( snake_case_ = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
_A : str = nums[0]
for i in range(1,len(snake_case_ ) ):
_A : List[Any] = nums[i]
_A : Optional[Any] = max(snake_case_,ans + num,snake_case_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_snake_case = int(input("Enter number of elements : ").strip())
_snake_case = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n]
print(max_subsequence_sum(array))
| 343 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCAmelCase_ ( snake_case_ ):
_A : Tuple = filter(lambda snake_case_ : p.requires_grad,model.parameters() )
_A : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_snake_case = logging.getLogger(__name__)
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
if metric == "rouge2":
_A : Optional[int] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_A : Dict = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_A : List[str] = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
""" function.""" )
_A : Optional[int] = ModelCheckpoint(
dirpath=snake_case_,filename=snake_case_,monitor=f'''val_{metric}''',mode="""max""",save_top_k=3,every_n_epochs=1,)
return checkpoint_callback
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
return EarlyStopping(
monitor=f'''val_{metric}''',mode="""min""" if """loss""" in metric else """max""",patience=snake_case_,verbose=snake_case_,)
class lowercase ( pl.Callback ):
def a__ ( self , _a , _a ) -> Optional[Any]:
_A : List[Any] = {F'''lr_group_{i}''': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def a__ ( self , _a , _a , _a , _a=True ) -> None:
logger.info(F'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
_A : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_A : Dict = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A : List[Any] = od / """test_results.txt"""
_A : List[Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A : Optional[int] = od / F'''{type_path}_results/{trainer.global_step:05d}.txt'''
_A : int = od / F'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a , """a+""" ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
_A : List[Any] = metrics[key]
if isinstance(_a , torch.Tensor ):
_A : str = val.item()
_A : str = F'''{key}: {val:.6f}\n'''
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
_A : List[Any] = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_a )
@rank_zero_only
def a__ ( self , _a , _a ) -> str:
try:
_A : int = pl_module.model.model.num_parameters()
except AttributeError:
_A : str = pl_module.model.num_parameters()
_A : Optional[int] = count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def a__ ( self , _a , _a ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_a , _a , """test""" )
@rank_zero_only
def a__ ( self , _a , _a ) -> Tuple:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 343 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def _A ( snake_case ) -> str:
_lowercase : Optional[int] = "huggingface/label-files"
_lowercase : Any = "imagenet-1k-id2label.json"
_lowercase : Optional[Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type="dataset" ) , "r" ) )
_lowercase : Dict = {int(snake_case ): v for k, v in idalabel.items()}
_lowercase : Any = {v: k for k, v in idalabel.items()}
_lowercase : List[str] = "std_conv" if "bit" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
_lowercase : List[str] = BitConfig(
conv_layer=snake_case , num_labels=10_00 , idalabel=snake_case , labelaid=snake_case , )
return config
def _A ( snake_case ) -> Any:
if "stem.conv" in name:
_lowercase : Optional[int] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
_lowercase : List[Any] = name.replace("blocks" , "layers" )
if "head.fc" in name:
_lowercase : Dict = name.replace("head.fc" , "classifier.1" )
if name.startswith("norm" ):
_lowercase : Any = "bit." + name
if "bit" not in name and "classifier" not in name:
_lowercase : Optional[int] = "bit.encoder." + name
return name
def _A ( ) -> int:
_lowercase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_lowercase : Tuple = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def _A ( snake_case , snake_case , snake_case=False ) -> Any:
_lowercase : List[str] = get_config(snake_case )
# load original model from timm
_lowercase : List[str] = create_model(snake_case , pretrained=snake_case )
timm_model.eval()
# load state_dict of original model
_lowercase : Union[str, Any] = timm_model.state_dict()
for key in state_dict.copy().keys():
_lowercase : str = state_dict.pop(snake_case )
_lowercase : List[str] = val.squeeze() if "head" in key else val
# load HuggingFace model
_lowercase : Optional[Any] = BitForImageClassification(snake_case )
model.eval()
model.load_state_dict(snake_case )
# create image processor
_lowercase : Tuple = create_transform(**resolve_data_config({} , model=snake_case ) )
_lowercase : Union[str, Any] = transform.transforms
_lowercase : Optional[int] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
_lowercase : Optional[Any] = BitImageProcessor(
do_resize=snake_case , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
_lowercase : str = prepare_img()
_lowercase : List[str] = transform(snake_case ).unsqueeze(0 )
_lowercase : Optional[Any] = processor(snake_case , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case , snake_case )
# verify logits
with torch.no_grad():
_lowercase : Tuple = model(snake_case )
_lowercase : str = outputs.logits
print("Logits:" , logits[0, :3] )
print("Predicted class:" , model.config.idalabel[logits.argmax(-1 ).item()] )
_lowercase : Optional[Any] = timm_model(snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
_snake_case = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 250 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
@property
def _lowerCamelCase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowercase : List[str] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.dummy_uncond_unet
_lowercase : Dict = KarrasVeScheduler()
_lowercase : Any = KarrasVePipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : Any = torch.manual_seed(0 )
_lowercase : List[Any] = pipe(num_inference_steps=2 , generator=_UpperCamelCase , output_type="numpy" ).images
_lowercase : Optional[Any] = torch.manual_seed(0 )
_lowercase : List[str] = pipe(num_inference_steps=2 , generator=_UpperCamelCase , output_type="numpy" , return_dict=_UpperCamelCase )[0]
_lowercase : Any = image[0, -3:, -3:, -1]
_lowercase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class a__ ( unittest.TestCase ):
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = "google/ncsnpp-celebahq-256"
_lowercase : Any = UNetaDModel.from_pretrained(_UpperCamelCase )
_lowercase : List[Any] = KarrasVeScheduler()
_lowercase : int = KarrasVePipeline(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
_lowercase : Optional[Any] = torch.manual_seed(0 )
_lowercase : Tuple = pipe(num_inference_steps=20 , generator=_UpperCamelCase , output_type="numpy" ).images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase : Tuple = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 250 | 1 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=30 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=10 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=None , lowerCAmelCase__=2 , ) -> Optional[int]:
__magic_name__ : Any = parent
__magic_name__ : Dict = batch_size
__magic_name__ : Optional[int] = image_size
__magic_name__ : Tuple = patch_size
__magic_name__ : str = num_channels
__magic_name__ : Any = is_training
__magic_name__ : List[Any] = use_labels
__magic_name__ : int = hidden_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : List[str] = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : Tuple = hidden_dropout_prob
__magic_name__ : List[Any] = attention_probs_dropout_prob
__magic_name__ : int = type_sequence_label_size
__magic_name__ : int = initializer_range
__magic_name__ : List[Any] = scope
__magic_name__ : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ : Dict = (image_size // patch_size) ** 2
__magic_name__ : Tuple = num_patches + 1
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : str = None
if self.use_labels:
__magic_name__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : str = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : Tuple = ViTModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
__magic_name__ : Optional[Any] = ViTForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__magic_name__ : Tuple = 1
__magic_name__ : Optional[Any] = ViTForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
__magic_name__ : List[Any] = self.type_sequence_label_size
__magic_name__ : Optional[int] = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Any = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : Dict = 1
__magic_name__ : Tuple = ViTForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
__magic_name__ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : Dict = self.prepare_config_and_inputs()
(
__magic_name__
) : int = config_and_inputs
__magic_name__ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__ : Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase__ : int = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
lowercase__ : Dict = True
lowercase__ : str = False
lowercase__ : Optional[int] = False
lowercase__ : Tuple = False
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : List[str] = ViTModelTester(self )
__magic_name__ : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def __magic_name__ ( self ) -> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __magic_name__ ( self ) -> Any:
pass
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : int = model_class(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Tuple = [*signature.parameters.keys()]
__magic_name__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : str = ViTModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def UpperCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Any = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(lowerCAmelCase__ )
__magic_name__ : int = self.default_image_processor
__magic_name__ : List[str] = prepare_img()
__magic_name__ : Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : str = model(**lowerCAmelCase__ )
# verify the logits
__magic_name__ : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
__magic_name__ : List[str] = torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : Tuple = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(lowerCAmelCase__ )
__magic_name__ : Dict = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=4_80 )
__magic_name__ : List[Any] = prepare_img()
__magic_name__ : Optional[int] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" )
__magic_name__ : str = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__magic_name__ : List[str] = model(lowerCAmelCase__ , interpolate_pos_encoding=lowerCAmelCase__ )
# verify the logits
__magic_name__ : Optional[Any] = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
__magic_name__ : Optional[int] = torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : Dict = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
__magic_name__ : Union[str, Any] = self.default_image_processor
__magic_name__ : List[str] = prepare_img()
__magic_name__ : Optional[Any] = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" )
__magic_name__ : Any = inputs.pixel_values.to(lowerCAmelCase__ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__magic_name__ : List[Any] = model(lowerCAmelCase__ )
| 363 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class snake_case__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Any = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
__magic_name__ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
__magic_name__ : Tuple = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
__magic_name__ : List[Any] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
__magic_name__ : Any = shift_tokens_right(lowerCAmelCase__ , model.config.pad_token_id , model.config.decoder_start_token_id )
__magic_name__ : List[Any] = model(lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ ).logits
__magic_name__ : Tuple = optax.softmax_cross_entropy(lowerCAmelCase__ , onehot(lowerCAmelCase__ , logits.shape[-1] ) ).mean()
__magic_name__ : List[Any] = -(labels.shape[-1] * loss.item())
__magic_name__ : List[Any] = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 138 | 0 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
_a = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
_a = logging.get_logger(__name__)
class A_ ( UpperCAmelCase__ ):
_lowercase : Union[str, Any] = "maskformer"
_lowercase : Dict = {"hidden_size": "mask_feature_size"}
_lowercase : Optional[Any] = ["resnet", "swin"]
_lowercase : Optional[int] = ["detr"]
def __init__( self : List[Any] , UpperCAmelCase : int = 2_5_6 , UpperCAmelCase : int = 2_5_6 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional[Dict] = None , UpperCAmelCase : Optional[Dict] = None , UpperCAmelCase : float = 0.02 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : float = 20.0 , UpperCAmelCase : Optional[bool] = None , **UpperCAmelCase : List[Any] , ) -> Any:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__lowerCAmelCase: Any = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: str = backbone_config.pop('model_type' )
__lowerCAmelCase: List[Any] = CONFIG_MAPPING[backbone_model_type]
__lowerCAmelCase: int = config_class.from_dict(UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__lowerCAmelCase: str = DetrConfig()
else:
# verify that the decoder is supported
__lowerCAmelCase: str = (
decoder_config.pop('model_type' ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'''Transformer Decoder {decoder_type} not supported, please use one of'''
F''' {','.join(self.decoders_supported )}''' )
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCAmelCase: Any = CONFIG_MAPPING[decoder_type]
__lowerCAmelCase: List[Any] = config_class.from_dict(UpperCAmelCase )
__lowerCAmelCase: List[str] = backbone_config
__lowerCAmelCase: Dict = decoder_config
# main feature dimension for the model
__lowerCAmelCase: Optional[int] = fpn_feature_size
__lowerCAmelCase: int = mask_feature_size
# initializer
__lowerCAmelCase: Optional[Any] = init_std
__lowerCAmelCase: Tuple = init_xavier_std
# Hungarian matcher && loss
__lowerCAmelCase: Any = cross_entropy_weight
__lowerCAmelCase: List[str] = dice_weight
__lowerCAmelCase: Optional[int] = mask_weight
__lowerCAmelCase: Optional[Any] = use_auxiliary_loss
__lowerCAmelCase: int = no_object_weight
__lowerCAmelCase: Dict = output_auxiliary_logits
__lowerCAmelCase: Tuple = self.decoder_config.encoder_attention_heads
__lowerCAmelCase: Dict = self.decoder_config.num_hidden_layers
super().__init__(**UpperCAmelCase )
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : PretrainedConfig , UpperCAmelCase : PretrainedConfig , **UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
return cls(
backbone_config=UpperCAmelCase , decoder_config=UpperCAmelCase , **UpperCAmelCase , )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
__lowerCAmelCase: int = copy.deepcopy(self.__dict__ )
__lowerCAmelCase: Optional[int] = self.backbone_config.to_dict()
__lowerCAmelCase: Optional[int] = self.decoder_config.to_dict()
__lowerCAmelCase: List[Any] = self.__class__.model_type
return output
| 322 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a__ :
def __init__( self : Union[str, Any] , a : Union[str, Any] , a : Tuple=13 , a : Optional[Any]=7 , a : List[Any]=True , a : Optional[Any]=True , a : Any=True , a : Union[str, Any]=99 , a : Any=32 , a : int=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Optional[Any]="gelu" , a : Union[str, Any]=0.1 , a : Any=0.1 , a : Optional[int]=5_12 , a : int=16 , a : Optional[Any]=2 , a : Union[str, Any]=0.02 , a : Any=3 , a : Dict=4 , a : Any=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Tuple , a : List[Any] , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , head_mask=a )
__lowerCamelCase = model(a , token_type_ids=a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Union[str, Any] , a : Dict , a : Union[str, Any] , a : Tuple , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , *a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTDoubleHeadsModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int , a : Dict , a : Optional[Any] , a : str , *a : int ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = OpenAIGPTForSequenceClassification(a )
model.to(a )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : List[str] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase : str =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase : Optional[int] =(
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple , a : Optional[int] , a : int , a : str , a : Any ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int , a : Optional[int] , a : str=False ):
"""simple docstring"""
__lowerCamelCase = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a , )
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a , )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = OpenAIGPTModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(a )
__lowerCamelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=a ) # the president is
__lowerCamelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowerCamelCase = model.generate(a , do_sample=a )
self.assertListEqual(output_ids[0].tolist() , a )
| 67 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = XLMTokenizer
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
a =dict(zip(__A , range(len(__A ) ) ) )
a =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(__A ) )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict:
a ='''lower newer'''
a ='''lower newer'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =XLMTokenizer(self.vocab_file , self.merges_file )
a ='''lower'''
a =['''low''', '''er</w>''']
a =tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
a =tokens + ['''<unk>''']
a =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
a =tokenizer.encode('''sequence builders''' , add_special_tokens=__A )
a =tokenizer.encode('''multi-sequence build''' , add_special_tokens=__A )
a =tokenizer.build_inputs_with_special_tokens(__A )
a =tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 367 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = IFInpaintingSuperResolutionPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"original_image"} )
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {"latents"}
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
return self._get_superresolution_dummy_components()
def SCREAMING_SNAKE_CASE ( self , __A , __A=0 ) -> Optional[int]:
if str(__A ).startswith('''mps''' ):
a =torch.manual_seed(__A )
else:
a =torch.Generator(device=__A ).manual_seed(__A )
a =floats_tensor((1, 3, 16, 16) , rng=random.Random(__A ) ).to(__A )
a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
a =floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
a ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def SCREAMING_SNAKE_CASE ( self ) -> int:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
self._test_save_load_local()
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 215 | 0 |
'''simple docstring'''
from __future__ import annotations
A_ : Dict = list[list[int]]
# assigning initial values to the grid
A_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
A_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def snake_case_ ( lowerCAmelCase_ )-> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def snake_case_ ( lowerCAmelCase_ )-> Matrix | None:
'''simple docstring'''
if location := find_empty_location(lowerCAmelCase_ ):
_UpperCAmelCase ,_UpperCAmelCase : List[str] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Optional[int] = digit
if sudoku(lowerCAmelCase_ ) is not None:
return grid
_UpperCAmelCase : Union[str, Any] = 0
return None
def snake_case_ ( lowerCAmelCase_ )-> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(lowerCAmelCase_ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 2_0)
print_solution(example_grid)
print("""\nExample grid solution:""")
A_ : List[Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 215 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ) -> Optional[Any]:
_UpperCAmelCase : str = """laion/clap-htsat-unfused"""
_UpperCAmelCase : int = tempfile.mkdtemp()
def _snake_case ( self ,**a_ ) -> str:
return RobertaTokenizer.from_pretrained(self.checkpoint ,**a_ )
def _snake_case ( self ,**a_ ) -> Tuple:
return ClapFeatureExtractor.from_pretrained(self.checkpoint ,**a_ )
def _snake_case ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Optional[int]:
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : Any = self.get_feature_extractor()
_UpperCAmelCase : int = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,a_ )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,a_ )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : int = ClapProcessor(tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[str] = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
_UpperCAmelCase : List[Any] = self.get_feature_extractor(do_normalize=a_ ,padding_value=1.0 )
_UpperCAmelCase : Optional[Any] = ClapProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=a_ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,a_ )
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor ,a_ )
def _snake_case ( self ) -> str:
_UpperCAmelCase : Tuple = self.get_feature_extractor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : str = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : Tuple = floats_list((3, 1_000) )
_UpperCAmelCase : int = feature_extractor(a_ ,return_tensors="""np""" )
_UpperCAmelCase : Union[str, Any] = processor(audios=a_ ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _snake_case ( self ) -> Tuple:
_UpperCAmelCase : List[Any] = self.get_feature_extractor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : Union[str, Any] = """This is a test string"""
_UpperCAmelCase : Optional[Any] = processor(text=a_ )
_UpperCAmelCase : Any = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : str = self.get_feature_extractor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : Any = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
_UpperCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : Dict = processor.batch_decode(a_ )
_UpperCAmelCase : Any = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ ,a_ )
def _snake_case ( self ) -> Dict:
_UpperCAmelCase : List[str] = self.get_feature_extractor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Dict = ClapProcessor(tokenizer=a_ ,feature_extractor=a_ )
self.assertListEqual(
processor.model_input_names[2:] ,feature_extractor.model_input_names ,msg="""`processor` and `feature_extractor` model input names do not match""" ,)
| 215 | 1 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_lowerCamelCase = trt.Logger(trt.Logger.WARNING)
_lowerCamelCase = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_lowerCamelCase = logging.getLogger(__name__)
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_84,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_28,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
_lowerCamelCase = parser.parse_args()
if args.tokenizer_name:
_lowerCamelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
_lowerCamelCase = args.per_device_eval_batch_size
_lowerCamelCase = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_lowerCamelCase = True
_lowerCamelCase = 'temp_engine/bert-fp32.engine'
if args.fpaa:
_lowerCamelCase = 'temp_engine/bert-fp16.engine'
if args.inta:
_lowerCamelCase = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
_lowerCamelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_lowerCamelCase = [network.get_input(i) for i in range(network.num_inputs)]
_lowerCamelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_lowerCamelCase = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_lowerCamelCase = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_lowerCamelCase = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any ) -> List[str]:
UpperCAmelCase_ = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
UpperCAmelCase_ = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
UpperCAmelCase_ = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , __UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , __UpperCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , __UpperCamelCase )
# start time
UpperCAmelCase_ = time.time()
# Run inference
context.execute_async(
bindings=[int(__UpperCamelCase ) for d_inp in d_inputs] + [int(__UpperCamelCase ), int(__UpperCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
cuda.memcpy_dtoh_async(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
UpperCAmelCase_ = time.time()
UpperCAmelCase_ = end_time - start_time
UpperCAmelCase_ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_lowerCamelCase = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_lowerCamelCase = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_lowerCamelCase = raw_datasets['validation'].column_names
_lowerCamelCase = 'question' if 'question' in column_names else column_names[0]
_lowerCamelCase = 'context' if 'context' in column_names else column_names[1]
_lowerCamelCase = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_lowerCamelCase = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
_lowerCamelCase = min(args.max_seq_length, tokenizer.model_max_length)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict ) -> List[str]:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
UpperCAmelCase_ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
UpperCAmelCase_ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=__UpperCamelCase , stride=args.doc_stride , return_overflowing_tokens=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
UpperCAmelCase_ = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
UpperCAmelCase_ = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
UpperCAmelCase_ = tokenized_examples.sequence_ids(__UpperCamelCase )
UpperCAmelCase_ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
UpperCAmelCase_ = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
UpperCAmelCase_ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
_lowerCamelCase = raw_datasets['validation']
# Validation Feature Creation
_lowerCamelCase = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
_lowerCamelCase = default_data_collator
_lowerCamelCase = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
_lowerCamelCase = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Any="eval" ) -> Union[str, Any]:
# Post-processing: we match the start logits and end logits to answers in the original context.
UpperCAmelCase_ = postprocess_qa_predictions(
examples=__UpperCamelCase , features=__UpperCamelCase , predictions=__UpperCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=__UpperCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
UpperCAmelCase_ = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
UpperCAmelCase_ = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
UpperCAmelCase_ = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=__UpperCamelCase , label_ids=__UpperCamelCase )
_lowerCamelCase = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
return trt.volume(engine.get_binding_shape(__UpperCamelCase ) ) * engine.get_binding_dtype(__UpperCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
_lowerCamelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_lowerCamelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes)
_lowerCamelCase = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_lowerCamelCase = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F" Num examples = {len(eval_dataset)}")
logger.info(F" Batch size = {args.per_device_eval_batch_size}")
_lowerCamelCase = 0.0
_lowerCamelCase = 0
_lowerCamelCase = timeit.default_timer()
_lowerCamelCase = None
for step, batch in enumerate(eval_dataloader):
_lowerCamelCase , _lowerCamelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_lowerCamelCase , _lowerCamelCase = outputs
_lowerCamelCase = torch.tensor(start_logits)
_lowerCamelCase = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_lowerCamelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
_lowerCamelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
_lowerCamelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_lowerCamelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
_lowerCamelCase = nested_truncate(all_preds, len(eval_dataset))
_lowerCamelCase = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 10_00 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 10_00))
logger.info('Total Number of Inference = %d', niter)
_lowerCamelCase = post_processing_function(eval_examples, eval_dataset, all_preds)
_lowerCamelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"Evaluation metrics: {eval_metric}")
| 177 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ = len(__UpperCamelCase )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__UpperCamelCase )]
# Reverse whole list
UpperCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__UpperCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 177 | 1 |
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def UpperCamelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : Dict="shi-labs/oneformer_demo" ):
with open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) as f:
snake_case : Optional[int] = json.load(__lowerCamelCase )
snake_case : Optional[int] = {}
snake_case : Any = []
snake_case : List[str] = []
for key, info in class_info.items():
snake_case : Union[str, Any] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(__lowerCamelCase ) )
snake_case : List[Any] = thing_ids
snake_case : Optional[Any] = class_names
return metadata
class UpperCAmelCase ( unittest.TestCase ):
def __init__(self : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[Any]=7 , snake_case__ : str=3 , snake_case__ : Optional[Any]=30 , snake_case__ : Union[str, Any]=4_00 , snake_case__ : List[Any]=None , snake_case__ : Any=True , snake_case__ : Optional[int]=True , snake_case__ : str=[0.5, 0.5, 0.5] , snake_case__ : Optional[int]=[0.5, 0.5, 0.5] , snake_case__ : List[str]=10 , snake_case__ : int=False , snake_case__ : str=2_55 , snake_case__ : List[Any]="shi-labs/oneformer_demo" , snake_case__ : Optional[Any]="ade20k_panoptic.json" , snake_case__ : str=10 , ) -> Tuple:
'''simple docstring'''
snake_case : int = parent
snake_case : List[Any] = batch_size
snake_case : str = num_channels
snake_case : Dict = min_resolution
snake_case : int = max_resolution
snake_case : Dict = do_resize
snake_case : Union[str, Any] = {"shortest_edge": 32, "longest_edge": 13_33} if size is None else size
snake_case : Optional[int] = do_normalize
snake_case : Optional[int] = image_mean
snake_case : Dict = image_std
snake_case : List[str] = class_info_file
snake_case : int = prepare_metadata(snake_case__ , snake_case__ )
snake_case : Optional[int] = num_text
snake_case : Any = repo_path
# for the post_process_functions
snake_case : Optional[int] = 2
snake_case : Optional[int] = 10
snake_case : List[str] = 10
snake_case : Optional[int] = 3
snake_case : Union[str, Any] = 4
snake_case : str = num_labels
snake_case : Optional[Any] = do_reduce_labels
snake_case : int = ignore_index
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def _SCREAMING_SNAKE_CASE (self : List[str] , snake_case__ : str , snake_case__ : Optional[int]=False ) -> Tuple:
'''simple docstring'''
if not batched:
snake_case : Union[str, Any] = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
snake_case , snake_case : List[Any] = image.size
else:
snake_case , snake_case : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case : int = int(self.size["shortest_edge"] * h / w )
snake_case : str = self.size["shortest_edge"]
elif w > h:
snake_case : int = self.size["shortest_edge"]
snake_case : List[str] = int(self.size["shortest_edge"] * w / h )
else:
snake_case : Dict = self.size["shortest_edge"]
snake_case : Optional[Any] = self.size["shortest_edge"]
else:
snake_case : List[Any] = []
for image in image_inputs:
snake_case , snake_case : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case : Union[str, Any] = max(snake_case__ , key=lambda snake_case__ : item[0] )[0]
snake_case : int = max(snake_case__ , key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
def _SCREAMING_SNAKE_CASE (self : Any ) -> List[Any]:
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class UpperCAmelCase ( A_ ,unittest.TestCase ):
A__ : str = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
A__ : Tuple = image_processing_class
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case : Tuple = OneFormerImageProcessorTester(self )
@property
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "ignore_index" ) )
self.assertTrue(hasattr(snake_case__ , "class_info_file" ) )
self.assertTrue(hasattr(snake_case__ , "num_text" ) )
self.assertTrue(hasattr(snake_case__ , "repo_path" ) )
self.assertTrue(hasattr(snake_case__ , "metadata" ) )
self.assertTrue(hasattr(snake_case__ , "do_reduce_labels" ) )
def _SCREAMING_SNAKE_CASE (self : Optional[int] ) -> str:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Dict ) -> Any:
'''simple docstring'''
snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
snake_case : Optional[int] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
snake_case , snake_case : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case , snake_case : Optional[int] = self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
snake_case : str = image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : List[str] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
snake_case : Union[str, Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
snake_case , snake_case : int = self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case , snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
snake_case : Optional[int] = image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Any = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
snake_case : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
snake_case , snake_case : Union[str, Any] = self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case , snake_case : Optional[int] = self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
snake_case : List[str] = image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE (self : Dict , snake_case__ : Dict=False , snake_case__ : Union[str, Any]=False , snake_case__ : str="np" ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
snake_case : int = self.image_processing_tester.num_labels
snake_case : Tuple = None
snake_case : List[str] = None
snake_case : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ )
if with_segmentation_maps:
snake_case : List[str] = num_labels
if is_instance_map:
snake_case : int = list(range(snake_case__ ) ) * 2
snake_case : Union[str, Any] = dict(enumerate(snake_case__ ) )
snake_case : Optional[Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
snake_case : List[str] = [Image.fromarray(snake_case__ ) for annotation in annotations]
snake_case : Dict = image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , snake_case__ , return_tensors="pt" , instance_id_to_semantic_id=snake_case__ , pad_and_return_pixel_mask=snake_case__ , )
return inputs
def _SCREAMING_SNAKE_CASE (self : int ) -> Dict:
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE (self : Any ) -> Dict:
'''simple docstring'''
def common(snake_case__ : List[str]=False , snake_case__ : Optional[Any]=None ):
snake_case : Dict = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case__ , is_instance_map=snake_case__ , segmentation_type=snake_case__ )
snake_case : List[str] = inputs["mask_labels"]
snake_case : Tuple = inputs["class_labels"]
snake_case : Dict = inputs["pixel_values"]
snake_case : Tuple = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case__ , snake_case__ , snake_case__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case__ )
common(is_instance_map=snake_case__ , segmentation_type="pil" )
common(is_instance_map=snake_case__ , segmentation_type="pil" )
def _SCREAMING_SNAKE_CASE (self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Any = np.zeros((20, 50) )
snake_case : List[str] = 1
snake_case : List[str] = 1
snake_case : Optional[Any] = 1
snake_case : int = binary_mask_to_rle(snake_case__ )
self.assertEqual(len(snake_case__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def _SCREAMING_SNAKE_CASE (self : Optional[Any] ) -> int:
'''simple docstring'''
snake_case : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
snake_case : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case : Tuple = fature_extractor.post_process_semantic_segmentation(snake_case__ )
self.assertEqual(len(snake_case__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
snake_case : Union[str, Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
snake_case : Optional[int] = fature_extractor.post_process_semantic_segmentation(snake_case__ , target_sizes=snake_case__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def _SCREAMING_SNAKE_CASE (self : Tuple ) -> List[str]:
'''simple docstring'''
snake_case : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
snake_case : int = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case : List[str] = image_processor.post_process_instance_segmentation(snake_case__ , threshold=0 )
self.assertTrue(len(snake_case__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def _SCREAMING_SNAKE_CASE (self : str ) -> str:
'''simple docstring'''
snake_case : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
snake_case : Dict = self.image_processing_tester.get_fake_oneformer_outputs()
snake_case : List[Any] = image_processor.post_process_panoptic_segmentation(snake_case__ , threshold=0 )
self.assertTrue(len(snake_case__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 59 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : PreTrainedTokenizer , __lowerCamelCase : int , __lowerCamelCase : Optional[int] = None , ):
snake_case : int = {}
if train_file is not None:
snake_case : List[Any] = [train_file]
if eval_file is not None:
snake_case : Optional[int] = [eval_file]
if test_file is not None:
snake_case : Any = [test_file]
snake_case : int = datasets.load_dataset("csv" , data_files=__lowerCamelCase )
snake_case : str = list(ds[list(files.keys() )[0]].features.keys() )
snake_case : int = features_name.pop(__lowerCamelCase )
snake_case : str = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case : str = {label: i for i, label in enumerate(__lowerCamelCase )}
snake_case : List[Any] = tokenizer.model_input_names
snake_case : List[Any] = {}
if len(__lowerCamelCase ) == 1:
for k in files.keys():
snake_case : Tuple = ds[k].map(
lambda __lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" ) , batched=__lowerCamelCase , )
elif len(__lowerCamelCase ) == 2:
for k in files.keys():
snake_case : List[Any] = ds[k].map(
lambda __lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding="max_length" , ) , batched=__lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case : Dict = {k: v for k, v in ex.items() if k in input_names}
snake_case : Union[str, Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case : str = {k: v for k, v in ex.items() if k in input_names}
snake_case : Any = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case : str = {k: v for k, v in ex.items() if k in input_names}
snake_case : List[str] = labelaid[ex[label_name]]
yield (d, label)
snake_case : int = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case : Optional[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case : Tuple = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case : Optional[int] = (
tf.data.Dataset.from_generator(
__lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case : str = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowerCamelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
A__ : int = field(metadata={"help": "Which column contains the label"} )
A__ : str = field(default=A_ ,metadata={"help": "The path of the training file"} )
A__ : Optional[str] = field(default=A_ ,metadata={"help": "The path of the development file"} )
A__ : Optional[str] = field(default=A_ ,metadata={"help": "The path of the test file"} )
A__ : int = field(
default=1_28 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
A__ : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class UpperCAmelCase :
A__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
A__ : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
A__ : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
A__ : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
A__ : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case , snake_case , snake_case : int = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case , snake_case , snake_case , snake_case : Tuple = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowerCamelCase ) , labelaid=__lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case : int = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__lowerCamelCase : EvalPrediction ) -> Dict:
snake_case : Optional[int] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case : int = TFTrainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case : int = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case : Any = trainer.evaluate()
snake_case : List[Any] = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__lowerCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(__lowerCamelCase )
return results
if __name__ == "__main__":
main()
| 59 | 1 |
"""simple docstring"""
from datetime import datetime
import requests
def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ):
lowercase_ : str = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
lowercase_ : Union[str, Any] = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(__lowerCAmelCase ).content
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter Video/IGTV url: ").strip()
__SCREAMING_SNAKE_CASE =f"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, "wb") as fp:
fp.write(download_video(url))
print(f"Done. Video saved to disk as {file_name}.")
| 350 | """simple docstring"""
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Optional[int] = bp_numa
lowercase_ : Dict = bp_numa
lowercase_ : Tuple = bp_numa
lowercase_ : List[Any] = conva_get[:2]
lowercase_ : int = conva_get[2]
lowercase_ : Dict = size_pa
lowercase_ : int = rate_w
lowercase_ : Union[str, Any] = rate_t
lowercase_ : Dict = [
np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 )
lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1
lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1
lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : int = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__UpperCamelCase ,'wb' ) as f:
pickle.dump(__UpperCamelCase ,__UpperCamelCase )
print(f'''Model saved: {save_path}''' )
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
with open(__UpperCamelCase ,'rb' ) as f:
lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301
lowercase_ : str = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' )
lowercase_ : Optional[Any] = model_dic.get('num_bp1' )
lowercase_ : str = model_dic.get('num_bp2' )
lowercase_ : Optional[Any] = model_dic.get('num_bp3' )
lowercase_ : Union[str, Any] = model_dic.get('rate_weight' )
lowercase_ : Optional[int] = model_dic.get('rate_thre' )
# create model instance
lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# modify model parameter
lowercase_ : Optional[Any] = model_dic.get('w_conv1' )
lowercase_ : Tuple = model_dic.get('wkj' )
lowercase_ : Union[str, Any] = model_dic.get('vji' )
lowercase_ : Optional[Any] = model_dic.get('thre_conv1' )
lowercase_ : Dict = model_dic.get('thre_bp2' )
lowercase_ : Optional[int] = model_dic.get('thre_bp3' )
return conv_ins
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
return 1 / (1 + np.exp(-1 * x ))
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
return round(__UpperCamelCase ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Dict = convs[0]
lowercase_ : Any = convs[1]
lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0]
# get the data slice of original image data, data_focus
lowercase_ : Tuple = []
for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ):
lowercase_ : List[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
lowercase_ : Dict = []
lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__UpperCamelCase ):
lowercase_ : Tuple = []
for i_focus in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[int] = (
np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__UpperCamelCase ) )
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape(
__UpperCamelCase ,__UpperCamelCase )
data_featuremap.append(__UpperCamelCase )
# expanding the data slice to One dimenssion
lowercase_ : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) )
lowercase_ : str = np.asarray(__UpperCamelCase )
return focus_list, data_featuremap
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple:
'''simple docstring'''
lowercase_ : Union[str, Any] = len(featuremaps[0] )
lowercase_ : str = int(size_map / size_pooling )
lowercase_ : Optional[int] = []
for i_map in range(len(__UpperCamelCase ) ):
lowercase_ : int = featuremaps[i_map]
lowercase_ : List[str] = []
for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[str] = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__UpperCamelCase ) )
lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase )
featuremap_pooled.append(__UpperCamelCase )
return featuremap_pooled
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Tuple = []
for i in range(len(__UpperCamelCase ) ):
lowercase_ : Optional[Any] = np.shape(data[i] )
lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] )
lowercase_ : List[str] = data_listed.getA().tolist()[0]
data_expanded.extend(__UpperCamelCase )
lowercase_ : int = np.asarray(__UpperCamelCase )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : Any = np.asarray(__UpperCamelCase )
lowercase_ : Any = np.shape(__UpperCamelCase )
lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] )
return data_expanded
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Any = []
lowercase_ : List[Any] = 0
for i_map in range(__UpperCamelCase ):
lowercase_ : List[str] = np.ones((size_map, size_map) )
for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ):
lowercase_ : List[Any] = pd_pool[
i_pool
]
lowercase_ : Any = i_pool + 1
lowercase_ : Optional[int] = np.multiply(
__UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) )
pd_all.append(__UpperCamelCase )
return pd_all
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]:
'''simple docstring'''
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) )
lowercase_ : int = 0
lowercase_ : Tuple = []
lowercase_ : Tuple = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowercase_ : List[str] = 0
print(f'''-------------Learning Time {rp}--------------''' )
for p in range(len(__UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
lowercase_ : int = np.asmatrix(datas_train[p] )
lowercase_ : Any = np.asarray(datas_teach[p] )
lowercase_ , lowercase_ : Tuple = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : Optional[int] = np.shape(__UpperCamelCase )
lowercase_ : Optional[int] = self._expand(__UpperCamelCase )
lowercase_ : int = data_bp_input
lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa
lowercase_ : Dict = self.sig(__UpperCamelCase )
lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa
lowercase_ : int = self.sig(__UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowercase_ : str = np.multiply(
(data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Optional[int] = np.multiply(
np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) )
lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji )
lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowercase_ : Dict = pd_conva_pooled.T.getA().tolist()
lowercase_ : List[Any] = self._calculate_gradient_from_pool(
__UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,)
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowercase_ : Dict = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre
lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowercase_ : int = rp + 1
lowercase_ : Union[str, Any] = error_count / patterns
all_mse.append(__UpperCamelCase )
def draw_error():
lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__UpperCamelCase ,'+-' )
plt.plot(__UpperCamelCase ,'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__UpperCamelCase ,alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Union[str, Any] = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) )
for p in range(len(__UpperCamelCase ) ):
lowercase_ : List[Any] = np.asmatrix(datas_test[p] )
lowercase_ , lowercase_ : Optional[Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga )
lowercase_ : List[str] = self._expand(__UpperCamelCase )
lowercase_ : Any = data_bp_input
lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa
lowercase_ : str = self.sig(__UpperCamelCase )
lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa
lowercase_ : Optional[int] = self.sig(__UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out]
return np.asarray(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase )
lowercase_ , lowercase_ : Union[str, Any] = self.convolute(
__UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,)
lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 321 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a : List[Any] = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(__UpperCAmelCase, __UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) )
snake_case_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
snake_case_ = {v: k for k, v in idalabel.items()}
snake_case_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
snake_case_ = BitConfig(
conv_layer=__UpperCAmelCase, num_labels=1000, idalabel=__UpperCAmelCase, labelaid=__UpperCAmelCase, )
return config
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if "stem.conv" in name:
snake_case_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
snake_case_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
snake_case_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
snake_case_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
snake_case_ = '''bit.encoder.''' + name
return name
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(__UpperCAmelCase, stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase=False ) -> str:
'''simple docstring'''
snake_case_ = get_config(__UpperCAmelCase )
# load original model from timm
snake_case_ = create_model(__UpperCAmelCase, pretrained=__UpperCAmelCase )
timm_model.eval()
# load state_dict of original model
snake_case_ = timm_model.state_dict()
for key in state_dict.copy().keys():
snake_case_ = state_dict.pop(__UpperCAmelCase )
snake_case_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
snake_case_ = BitForImageClassification(__UpperCAmelCase )
model.eval()
model.load_state_dict(__UpperCAmelCase )
# create image processor
snake_case_ = create_transform(**resolve_data_config({}, model=__UpperCAmelCase ) )
snake_case_ = transform.transforms
snake_case_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
snake_case_ = BitImageProcessor(
do_resize=__UpperCAmelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__UpperCAmelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__UpperCAmelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
snake_case_ = prepare_img()
snake_case_ = transform(__UpperCAmelCase ).unsqueeze(0 )
snake_case_ = processor(__UpperCAmelCase, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCAmelCase, __UpperCAmelCase )
# verify logits
with torch.no_grad():
snake_case_ = model(__UpperCAmelCase )
snake_case_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
snake_case_ = timm_model(__UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase, outputs.logits, atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
a : Any = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 56 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
UpperCamelCase__: Tuple = numpy.array([0, 0])
UpperCamelCase__: Union[str, Any] = numpy.array([0.5, 0.8660254])
UpperCamelCase__: Dict = numpy.array([1, 0])
UpperCamelCase__: int = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] , _lowerCAmelCase : int ) -> list[numpy.ndarray]:
UpperCAmelCase : Union[str, Any] = initial_vectors
for _ in range(_lowerCAmelCase ):
UpperCAmelCase : Union[str, Any] = iteration_step(_lowerCAmelCase )
return vectors
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] ) -> list[numpy.ndarray]:
UpperCAmelCase : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase : List[str] = vectors[i + 1]
new_vectors.append(_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def snake_case_ ( _lowerCAmelCase : numpy.ndarray , _lowerCAmelCase : float ) -> numpy.ndarray:
UpperCAmelCase : List[str] = numpy.radians(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : Tuple = numpy.cos(_lowerCAmelCase ), numpy.sin(_lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = numpy.array(((c, -s), (s, c)) )
return numpy.dot(_lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( _lowerCAmelCase : list[numpy.ndarray] ) -> None:
UpperCAmelCase : List[Any] = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase , UpperCAmelCase : str = zip(*_lowerCAmelCase )
plt.plot(_lowerCAmelCase , _lowerCAmelCase )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__: List[Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 23 | 0 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
A_ : Optional[Any] = logging.get_logger(__name__)
A_ : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : Optional[Any] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A_ : str = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
A_ : str = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
A_ : List[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
A_ : int = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
A_ : Optional[int] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
A_ : Tuple = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
A_ : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
A_ : int = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _a (__snake_case ):
'''simple docstring'''
UpperCAmelCase__: List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__: List[Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: Any = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: Union[str, Any] = DPRContextEncoderTokenizer
class _a (__snake_case ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase__: List[str] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: Tuple = DPRQuestionEncoderTokenizer
A_ : Any = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
A_ : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
A_ : Union[str, Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__snake_case )
class _a :
'''simple docstring'''
def __call__( self , A__ , A__ = None , A__ = None , A__ = False , A__ = False , A__ = None , A__ = None , A__ = None , **A__ , ):
if titles is None and texts is None:
return super().__call__(
UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
elif titles is None or texts is None:
A__ : Tuple = titles if texts is None else texts
return super().__call__(
UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Dict = titles if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [titles]
A__ : Optional[Any] = texts if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [texts]
A__ : Union[str, Any] = len(UpperCamelCase__ )
A__ : List[str] = questions if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [questions] * n_passages
assert len(UpperCamelCase__ ) == len(
UpperCamelCase__ ), F"""There should be as many titles than texts but got {len(UpperCamelCase__ )} titles and {len(UpperCamelCase__ )} texts."""
A__ : Tuple = super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["input_ids"]
A__ : Union[str, Any] = super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["input_ids"]
A__ : Optional[int] = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__ )
]
}
if return_attention_mask is not False:
A__ : Tuple = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A__ : List[Any] = attention_mask
return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
def __A ( self , A__ , A__ , A__ = 16 , A__ = 64 , A__ = 4 , ):
A__ : List[Any] = reader_input["input_ids"]
A__ : List[Any] = reader_output[:3]
A__ : Optional[int] = len(UpperCamelCase__ )
A__ : Union[str, Any] = sorted(range(UpperCamelCase__ ) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__ )
A__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
A__ : Any = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A__ : Dict = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A__ : str = sequence_ids.index(self.pad_token_id )
else:
A__ : Union[str, Any] = len(UpperCamelCase__ )
A__ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __A ( self , A__ , A__ , A__ , A__ , ):
A__ : Tuple = []
for start_index, start_score in enumerate(UpperCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A__ : Tuple = sorted(UpperCamelCase__ , key=lambda A__ : x[1] , reverse=UpperCamelCase__ )
A__ : Tuple = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
A__ : Any = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class _a (__snake_case , __snake_case ):
'''simple docstring'''
UpperCAmelCase__: Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase__: str = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__: List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__: int = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__: Optional[Any] = ["""input_ids""", """attention_mask"""]
UpperCAmelCase__: Optional[int] = DPRReaderTokenizer
| 370 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Any = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class _a (__magic_name__ ):
'''simple docstring'''
UpperCAmelCase__: Dict = '''falcon'''
UpperCAmelCase__: Any = ['''past_key_values''']
def __init__( self , A__=6_5024 , A__=4544 , A__=32 , A__=71 , A__=1e-5 , A__=0.0_2 , A__=True , A__=0.0 , A__=0.0 , A__=None , A__=False , A__=False , A__=True , A__=True , A__=False , A__=11 , A__=11 , **A__ , ):
A__ : Dict = vocab_size
# Backward compatibility with n_embed kwarg
A__ : Union[str, Any] = kwargs.pop("""n_embed""" , A__ )
A__ : Optional[Any] = hidden_size if n_embed is None else n_embed
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[Any] = layer_norm_epsilon
A__ : Tuple = initializer_range
A__ : Tuple = use_cache
A__ : str = hidden_dropout
A__ : List[str] = attention_dropout
A__ : List[Any] = bos_token_id
A__ : Optional[Any] = eos_token_id
A__ : Optional[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
A__ : List[str] = alibi
A__ : Tuple = new_decoder_architecture
A__ : List[str] = multi_query # Ignored when new_decoder_architecture is True
A__ : List[Any] = parallel_attn
A__ : int = bias
super().__init__(bos_token_id=A__ , eos_token_id=A__ , **A__ )
@property
def __A ( self ):
return self.hidden_size // self.num_attention_heads
@property
def __A ( self ):
return not self.alibi
| 141 | 0 |
def a_ ( lowerCAmelCase_ : list ):
__lowerCAmelCase = len(lowerCAmelCase_ )
for i in range(1, lowerCAmelCase_ ):
__lowerCAmelCase = collection[i]
__lowerCAmelCase = 0
__lowerCAmelCase = i - 1
while low <= high:
__lowerCAmelCase = (low + high) // 2
if val < collection[mid]:
__lowerCAmelCase = mid - 1
else:
__lowerCAmelCase = mid + 1
for j in range(lowerCAmelCase_, lowerCAmelCase_, -1 ):
__lowerCAmelCase = collection[j - 1]
__lowerCAmelCase = val
return collection
if __name__ == "__main__":
_snake_case : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
_snake_case : List[Any] = [int(item) for item in user_input.split(',')]
print(binary_insertion_sort(unsorted))
| 284 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_snake_case : Tuple = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
_snake_case : str = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
_snake_case : List[str] = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowercase ( self : str ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def lowercase ( self : str , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] ) -> List[Any]:
__lowerCAmelCase = 0.0
for i, j in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase_ , lowerCAmelCase_ ) else 0.0
__lowerCAmelCase = n_correct / len(lowerCAmelCase_ )
return {
"accuracy": accuracy,
}
| 284 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__UpperCamelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_)
class lowercase__ ( UpperCamelCase_):
def __init__( self : Union[str, Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Dict ):
'''simple docstring'''
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == '''tf''' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self : Optional[Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Optional[int]=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = {}
SCREAMING_SNAKE_CASE : Any = {}
if prompt is not None:
SCREAMING_SNAKE_CASE : List[str] = prompt
if generate_kwargs is not None:
SCREAMING_SNAKE_CASE : Union[str, Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
SCREAMING_SNAKE_CASE : List[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'''\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'''
''' please use only one''' )
SCREAMING_SNAKE_CASE : Dict = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Any , UpperCamelCase__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **UpperCamelCase__ : Tuple ):
'''simple docstring'''
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = load_image(UpperCamelCase__ )
if prompt is not None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError(
f"""Received an invalid text input, got - {type(UpperCamelCase__ )} - but expected a single string. """
'''Note also that one single text can be provided for conditional image to text generation.''' )
SCREAMING_SNAKE_CASE : List[str] = self.model.config.model_type
if model_type == "git":
SCREAMING_SNAKE_CASE : int = self.image_processor(images=UpperCamelCase__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(text=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids
SCREAMING_SNAKE_CASE : Any = [self.tokenizer.cls_token_id] + input_ids
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(UpperCamelCase__ ).unsqueeze(0 )
model_inputs.update({'''input_ids''': input_ids} )
elif model_type == "pix2struct":
SCREAMING_SNAKE_CASE : Tuple = self.image_processor(images=UpperCamelCase__ , header_text=UpperCamelCase__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=UpperCamelCase__ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE : str = self.tokenizer(UpperCamelCase__ , return_tensors=self.framework )
model_inputs.update(UpperCamelCase__ )
else:
raise ValueError(f"""Model type {model_type} does not support conditional text generation""" )
else:
SCREAMING_SNAKE_CASE : Dict = self.image_processor(images=UpperCamelCase__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
SCREAMING_SNAKE_CASE : Optional[int] = None
return model_inputs
def __A ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs['''input_ids'''] , UpperCamelCase__ )
and all(x is None for x in model_inputs['''input_ids'''] )
):
SCREAMING_SNAKE_CASE : Tuple = None
if generate_kwargs is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
SCREAMING_SNAKE_CASE : Union[str, Any] = model_inputs.pop(self.model.main_input_name )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model.generate(UpperCamelCase__ , **UpperCamelCase__ , **UpperCamelCase__ )
return model_outputs
def __A ( self : List[Any] , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = []
for output_ids in model_outputs:
SCREAMING_SNAKE_CASE : Optional[Any] = {
'''generated_text''': self.tokenizer.decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , )
}
records.append(UpperCamelCase__ )
return records
| 258 | import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
__UpperCamelCase : Union[str, Any] = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class lowercase__ ( datasets.BuilderConfig):
UpperCamelCase_ = None
def A ( _lowercase , _lowercase , ):
import pyspark
def generate_fn():
SCREAMING_SNAKE_CASE : str = df.select('''*''' , pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
SCREAMING_SNAKE_CASE : Any = df_with_partition_id.select('''*''' ).where(f"""part_id = {partition_id}""" ).drop('''part_id''' )
SCREAMING_SNAKE_CASE : Tuple = partition_df.collect()
SCREAMING_SNAKE_CASE : str = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class lowercase__ ( _BaseExamplesIterable):
def __init__( self : Optional[Any] , UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : Union[str, Any]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = df
SCREAMING_SNAKE_CASE : List[str] = partition_order or range(self.df.rdd.getNumPartitions() )
SCREAMING_SNAKE_CASE : Dict = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
'''simple docstring'''
yield from self.generate_examples_fn()
def __A ( self : Tuple , UpperCamelCase__ : np.random.Generator ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(UpperCamelCase__ )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase__ )
def __A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.split_shard_indices_by_worker(UpperCamelCase__ , UpperCamelCase__ )
return SparkExamplesIterable(self.df , partition_order=UpperCamelCase__ )
@property
def __A ( self : Tuple ):
'''simple docstring'''
return len(self.partition_order )
class lowercase__ ( datasets.DatasetBuilder):
UpperCamelCase_ = SparkConfig
def __init__( self : Union[str, Any] , UpperCamelCase__ : "pyspark.sql.DataFrame" , UpperCamelCase__ : str = None , UpperCamelCase__ : str = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
import pyspark
SCREAMING_SNAKE_CASE : str = pyspark.sql.SparkSession.builder.getOrCreate()
SCREAMING_SNAKE_CASE : List[Any] = df
SCREAMING_SNAKE_CASE : Tuple = working_dir
super().__init__(
cache_dir=UpperCamelCase__ , config_name=str(self.df.semanticHash() ) , **UpperCamelCase__ , )
def __A ( self : Tuple ):
'''simple docstring'''
def create_cache_and_write_probe(UpperCamelCase__ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(UpperCamelCase__ , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
SCREAMING_SNAKE_CASE : Dict = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(UpperCamelCase__ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def __A ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : str , UpperCamelCase__ : datasets.download.download_manager.DownloadManager ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __A ( self : int , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
import pyspark
def get_arrow_batch_size(UpperCamelCase__ : Tuple ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
SCREAMING_SNAKE_CASE : int = self.df.count()
SCREAMING_SNAKE_CASE : Tuple = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.df.limit(UpperCamelCase__ )
.repartition(1 )
.mapInArrow(UpperCamelCase__ , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
SCREAMING_SNAKE_CASE : Optional[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
SCREAMING_SNAKE_CASE : List[str] = min(UpperCamelCase__ , int(approx_total_size / max_shard_size ) )
SCREAMING_SNAKE_CASE : Optional[int] = self.df.repartition(UpperCamelCase__ )
def __A ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , ):
'''simple docstring'''
import pyspark
SCREAMING_SNAKE_CASE : int = ParquetWriter if file_format == '''parquet''' else ArrowWriter
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(self._working_dir , os.path.basename(UpperCamelCase__ ) ) if self._working_dir else fpath
SCREAMING_SNAKE_CASE : Optional[int] = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
SCREAMING_SNAKE_CASE : str = self.config.features
SCREAMING_SNAKE_CASE : Optional[int] = self._writer_batch_size
SCREAMING_SNAKE_CASE : Optional[int] = self._fs.storage_options
def write_arrow(UpperCamelCase__ : Optional[Any] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
SCREAMING_SNAKE_CASE : int = pyspark.TaskContext().taskAttemptId()
SCREAMING_SNAKE_CASE : str = next(UpperCamelCase__ , UpperCamelCase__ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : Optional[int] = writer_class(
features=UpperCamelCase__ , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=UpperCamelCase__ , storage_options=UpperCamelCase__ , embed_local_files=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Tuple = pa.Table.from_batches([first_batch] )
writer.write_table(UpperCamelCase__ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
SCREAMING_SNAKE_CASE : Optional[int] = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , writer_batch_size=UpperCamelCase__ , storage_options=UpperCamelCase__ , embed_local_files=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : List[str] = pa.Table.from_batches([batch] )
writer.write_table(UpperCamelCase__ )
if writer._num_bytes > 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : int = os.path.join(os.path.dirname(UpperCamelCase__ ) , os.path.basename(UpperCamelCase__ ) )
shutil.move(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = (
self.df.mapInArrow(UpperCamelCase__ , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __A ( self : Dict , UpperCamelCase__ : "datasets.SplitGenerator" , UpperCamelCase__ : str = "arrow" , UpperCamelCase__ : Optional[Union[str, int]] = None , UpperCamelCase__ : Optional[int] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
self._validate_cache_dir()
SCREAMING_SNAKE_CASE : List[str] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = not is_remote_filesystem(self._fs )
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join if is_local else posixpath.join
SCREAMING_SNAKE_CASE : List[Any] = '''-TTTTT-SSSSS-of-NNNNN'''
SCREAMING_SNAKE_CASE : List[str] = f"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
SCREAMING_SNAKE_CASE : Dict = path_join(self._output_dir , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Dict = []
for task_id, content in self._prepare_split_single(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : int = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = total_num_examples
SCREAMING_SNAKE_CASE : Dict = total_num_bytes
# should rename everything at the end
logger.debug(f"""Renaming {total_shards} shards.""" )
if total_shards > 1:
SCREAMING_SNAKE_CASE : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
SCREAMING_SNAKE_CASE : str = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , ):
rename(
UpperCamelCase__ , fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace('''TTTTT-SSSSS''' , f"""{global_shard_id:05d}""" ).replace('''NNNNN''' , f"""{total_shards:05d}""" ) , )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = task_id_and_num_shards[i]
for shard_id in range(UpperCamelCase__ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(UpperCamelCase__ , len(UpperCamelCase__ ) ).map(lambda UpperCamelCase__ : _rename_shard(*UpperCamelCase__ ) ).collect()
else:
# don't use any pattern
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f"""{shard_id:05d}""" ).replace('''TTTTT''' , f"""{task_id:05d}""" ) , fpath.replace(UpperCamelCase__ , '''''' ) , )
def __A ( self : int , UpperCamelCase__ : "datasets.SplitGenerator" , ):
'''simple docstring'''
return SparkExamplesIterable(self.df )
| 258 | 1 |
"""simple docstring"""
import logging
from transformers import PretrainedConfig
__SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : str = {
"bertabs-finetuned-cnndm": "https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json",
}
class __A (_SCREAMING_SNAKE_CASE):
'''simple docstring'''
__lowercase: Union[str, Any] = """bertabs"""
def __init__( self : str , UpperCAmelCase_ : List[Any]=30_522 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : str=6 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : Optional[Any]=8 , UpperCAmelCase_ : List[Any]=512 , UpperCAmelCase_ : Any=0.2 , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : Tuple=768 , UpperCAmelCase_ : Union[str, Any]=8 , UpperCAmelCase_ : Optional[int]=2_048 , UpperCAmelCase_ : List[Any]=0.2 , **UpperCAmelCase_ : Tuple , ) ->Dict:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
snake_case_ = vocab_size
snake_case_ = max_pos
snake_case_ = enc_layers
snake_case_ = enc_hidden_size
snake_case_ = enc_heads
snake_case_ = enc_ff_size
snake_case_ = enc_dropout
snake_case_ = dec_layers
snake_case_ = dec_hidden_size
snake_case_ = dec_heads
snake_case_ = dec_ff_size
snake_case_ = dec_dropout
| 347 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : list[list[float]] ):
'''simple docstring'''
lowerCAmelCase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(A__ ):
if len(A__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(A__ ) )
return data_lists
def UpperCamelCase_ ( A__ : list[list[float]] , A__ : list[int] ):
'''simple docstring'''
lowerCAmelCase_ : list[list[float]] = []
for dlist, weight in zip(A__ , A__ ):
lowerCAmelCase_ : Tuple = min(A__ )
lowerCAmelCase_ : str = max(A__ )
lowerCAmelCase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCAmelCase_ : List[Any] = f'Invalid weight of {weight:f} provided'
raise ValueError(A__ )
score_lists.append(A__ )
return score_lists
def UpperCamelCase_ ( A__ : list[list[float]] ):
'''simple docstring'''
lowerCAmelCase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(A__ ):
lowerCAmelCase_ : List[Any] = final_scores[j] + ele
return final_scores
def UpperCamelCase_ ( A__ : list[list[float]] , A__ : list[int] ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = get_data(A__ )
lowerCAmelCase_ : Tuple = calculate_each_score(A__ , A__ )
lowerCAmelCase_ : Optional[int] = generate_final_scores(A__ )
# append scores to source data
for i, ele in enumerate(A__ ):
source_data[i].append(A__ )
return source_data
| 120 | 0 |
def a__ ( UpperCAmelCase : int = 10**12 ) -> int:
UpperCAmelCase : List[Any] = 1
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : int = 1
UpperCAmelCase : str = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f"""{solution() = }""")
| 355 |
from __future__ import annotations
import queue
class __UpperCAmelCase :
def __init__( self : str, __A : Union[str, Any] ):
UpperCAmelCase : Dict = data
UpperCAmelCase : Tuple = None
UpperCAmelCase : Any = None
def a__ ( ) -> TreeNode:
print('''\n********Press N to stop entering at any point of time********\n''' )
UpperCAmelCase : Any = input('''Enter the value of the root node: ''' ).strip().lower()
UpperCAmelCase : queue.Queue = queue.Queue()
UpperCAmelCase : Tuple = TreeNode(int(UpperCAmelCase ) )
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : int = q.get()
UpperCAmelCase : Union[str, Any] = f'''Enter the left node of {node_found.data}: '''
UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase : List[str] = TreeNode(int(UpperCAmelCase ) )
UpperCAmelCase : List[str] = left_node
q.put(UpperCAmelCase )
UpperCAmelCase : List[Any] = f'''Enter the right node of {node_found.data}: '''
UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase : Dict = TreeNode(int(UpperCAmelCase ) )
UpperCAmelCase : Dict = right_node
q.put(UpperCAmelCase )
raise
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : List[Any] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : int = []
while not q.empty():
UpperCAmelCase : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCAmelCase )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : List[str] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(UpperCAmelCase )
UpperCAmelCase : Dict = n.left
# end of while means current node doesn't have left child
UpperCAmelCase : Union[str, Any] = stack.pop()
# start to traverse its right child
UpperCAmelCase : List[str] = n.right
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : Any = node
while n or stack:
while n:
stack.append(UpperCAmelCase )
UpperCAmelCase : Dict = n.left
UpperCAmelCase : Optional[int] = stack.pop()
print(n.data , end=''',''' )
UpperCAmelCase : Any = n.right
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase , UpperCAmelCase : Dict = [], []
UpperCAmelCase : Any = node
stacka.append(UpperCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def a__ ( UpperCAmelCase : str = "" , UpperCAmelCase : int=50 , UpperCAmelCase : Union[str, Any]="*" ) -> str:
if not s:
return "\n" + width * char
UpperCAmelCase , UpperCAmelCase : int = divmod(width - len(UpperCAmelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
_lowerCamelCase : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 99 | 0 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__SCREAMING_SNAKE_CASE :Optional[Any] = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE :Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS)
__SCREAMING_SNAKE_CASE :Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__SCREAMING_SNAKE_CASE :List[str] = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__SCREAMING_SNAKE_CASE :Union[str, Any] = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def UpperCAmelCase_ ( __lowercase : List[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = None
# source code of `config_class`
_UpperCAmelCase = inspect.getsource(__lowercase )
_UpperCAmelCase = _re_checkpoint.findall(__lowercase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
_UpperCAmelCase = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase = ckpt_name
break
return checkpoint
def UpperCAmelCase_ ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_UpperCAmelCase = get_checkpoint_from_config_class(__lowercase )
_UpperCAmelCase = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__lowercase )
if len(__lowercase ) > 0:
_UpperCAmelCase = "\n".join(sorted(__lowercase ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 22 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__snake_case ={
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
__snake_case ={
"""facebook/blenderbot_small-90M""": 512,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Tuple = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = BlenderbotSmallTokenizer
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : int="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[Any] , ) -> Any:
super().__init__(
ByteLevelBPETokenizer(
vocab=UpperCAmelCase__ , merges=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , trim_offsets=UpperCAmelCase__ , ) , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = add_prefix_space
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict=None ) -> Any:
lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 4 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 288 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def snake_case_ ( snake_case = 3 ) -> qiskit.result.counts.Counts:
if isinstance(snake_case , snake_case ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(snake_case ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
lowercase__: str = QuantumRegister(snake_case , 'qr' )
lowercase__: str = ClassicalRegister(snake_case , 'cr' )
lowercase__: List[Any] = QuantumCircuit(snake_case , snake_case )
lowercase__: int = number_of_qubits
for i in range(snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case , snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case , snake_case )
# simulate with 10000 shots
lowercase__: str = Aer.get_backend('qasm_simulator' )
lowercase__: Union[str, Any] = execute(snake_case , snake_case , shots=1_00_00 )
return job.result().get_counts(snake_case )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 288 | 1 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
__lowerCAmelCase : Optional[Any] ='sshleifer/mar_enro_6_3_student'
class _lowercase ( A__ ):
'''simple docstring'''
def __magic_name__( self :List[str] ) -> Tuple:
super().setUp()
__SCREAMING_SNAKE_CASE : Tuple = cached_path(
'''https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz''' , extract_compressed_file=lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE : Optional[int] = f'''{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'''
@slow
@require_torch_gpu
def __magic_name__( self :List[Any] ) -> List[str]:
MarianMTModel.from_pretrained(lowerCAmelCase__ )
@slow
@require_torch_gpu
def __magic_name__( self :Dict ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Any = {
'''$MAX_LEN''': 64,
'''$BS''': 64,
'''$GAS''': 1,
'''$ENRO_DIR''': self.data_dir,
'''facebook/mbart-large-cc25''': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'''--learning_rate=3e-5''': '''--learning_rate 3e-4''',
'''--num_train_epochs 6''': '''--num_train_epochs 1''',
}
# Clean up bash script
__SCREAMING_SNAKE_CASE : List[Any] = (self.test_file_dir / '''train_mbart_cc25_enro.sh''').open().read().split('''finetune.py''' )[1].strip()
__SCREAMING_SNAKE_CASE : Any = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
for k, v in env_vars_to_replace.items():
__SCREAMING_SNAKE_CASE : List[Any] = bash_script.replace(lowerCAmelCase__ , str(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Any = self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__SCREAMING_SNAKE_CASE : Dict = f'''
--output_dir {output_dir}
--tokenizer_name Helsinki-NLP/opus-mt-en-ro
--sortish_sampler
--do_predict
--gpus 1
--freeze_encoder
--n_train 40000
--n_val 500
--n_test 500
--fp16_opt_level O1
--num_sanity_val_steps 0
--eval_beams 2
'''.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__SCREAMING_SNAKE_CASE : Optional[int] = ['''finetune.py'''] + bash_script.split() + args
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
__SCREAMING_SNAKE_CASE : Any = pl.Trainer.add_argparse_args(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = SummarizationModule.add_model_specific_args(lowerCAmelCase__ , os.getcwd() )
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
__SCREAMING_SNAKE_CASE : str = main(lowerCAmelCase__ )
# Check metrics
__SCREAMING_SNAKE_CASE : List[str] = load_json(model.metrics_save_path )
__SCREAMING_SNAKE_CASE : int = metrics['''val'''][0]
__SCREAMING_SNAKE_CASE : Dict = metrics['''val'''][-1]
self.assertEqual(len(metrics['''val'''] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] , lowerCAmelCase__ )
self.assertGreater(last_step_stats['''val_avg_gen_time'''] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['''val_avg_gen_time'''] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['''val_avg_bleu'''] - first_step_stats['''val_avg_bleu'''] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['''val_avg_bleu'''] , 17 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['''val'''][-1]['''val_avg_bleu'''] - metrics['''test'''][-1]['''test_avg_bleu'''] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__SCREAMING_SNAKE_CASE : Dict = os.listdir(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [x for x in contents if x.endswith('''.ckpt''' )][0]
__SCREAMING_SNAKE_CASE : int = os.path.join(args.output_dir , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__SCREAMING_SNAKE_CASE : Dict = {os.path.basename(lowerCAmelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
class _lowercase ( A__ ):
'''simple docstring'''
@timeout_decorator.timeout(600 )
@slow
@require_torch_gpu
def __magic_name__( self :List[Any] ) -> int:
__SCREAMING_SNAKE_CASE : str = f'''{self.test_file_dir_str}/test_data/wmt_en_ro'''
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'''--fp16_opt_level=O1''': '''''',
'''$MAX_LEN''': 128,
'''$BS''': 16,
'''$GAS''': 1,
'''$ENRO_DIR''': data_dir,
'''$m''': '''sshleifer/student_marian_en_ro_6_1''',
'''val_check_interval=0.25''': '''val_check_interval=1.0''',
}
# Clean up bash script
__SCREAMING_SNAKE_CASE : int = (
(self.test_file_dir / '''distil_marian_no_teacher.sh''').open().read().split('''distillation.py''' )[1].strip()
)
__SCREAMING_SNAKE_CASE : Optional[int] = bash_script.replace('''\\\n''' , '''''' ).strip().replace('''"$@"''' , '''''' )
__SCREAMING_SNAKE_CASE : Tuple = bash_script.replace('''--fp16 ''' , ''' ''' )
for k, v in env_vars_to_replace.items():
__SCREAMING_SNAKE_CASE : Any = bash_script.replace(lowerCAmelCase__ , str(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE : Any = bash_script.replace('''--fp16''' , '''''' )
__SCREAMING_SNAKE_CASE : Tuple = 6
__SCREAMING_SNAKE_CASE : str = (
['''distillation.py''']
+ bash_script.split()
+ [
f'''--output_dir={output_dir}''',
'''--gpus=1''',
'''--learning_rate=1e-3''',
f'''--num_train_epochs={epochs}''',
'''--warmup_steps=10''',
'''--val_check_interval=1.0''',
'''--do_predict''',
]
)
with patch.object(lowerCAmelCase__ , '''argv''' , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser()
__SCREAMING_SNAKE_CASE : str = pl.Trainer.add_argparse_args(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = SummarizationDistiller.add_model_specific_args(lowerCAmelCase__ , os.getcwd() )
__SCREAMING_SNAKE_CASE : str = parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__SCREAMING_SNAKE_CASE : Any = distill_main(lowerCAmelCase__ )
# Check metrics
__SCREAMING_SNAKE_CASE : List[Any] = load_json(model.metrics_save_path )
__SCREAMING_SNAKE_CASE : List[str] = metrics['''val'''][0]
__SCREAMING_SNAKE_CASE : List[str] = metrics['''val'''][-1]
assert len(metrics['''val'''] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'''val_avg_{model.val_metric}'''] , lowerCAmelCase__ )
# check lightning ckpt can be loaded and has a reasonable statedict
__SCREAMING_SNAKE_CASE : List[str] = os.listdir(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = [x for x in contents if x.endswith('''.ckpt''' )][0]
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(args.output_dir , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : int = '''model.model.decoder.layers.0.encoder_attn_layer_norm.weight'''
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__SCREAMING_SNAKE_CASE : Any = {os.path.basename(lowerCAmelCase__ ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['''test'''] ) == 1
| 9 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
SCREAMING_SNAKE_CASE :Union[str, Any] = namedtuple('''covid_data''', '''cases deaths recovered''')
def _lowerCAmelCase ( lowerCAmelCase_ :str = "https://www.worldometers.info/coronavirus/" )->covid_data:
'''simple docstring'''
snake_case_ = "//div[@class = \"maincounter-number\"]/span/text()"
return covid_data(*html.fromstring(requests.get(lowerCAmelCase_ ).content ).xpath(lowerCAmelCase_ ) )
SCREAMING_SNAKE_CASE :str = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats()))
| 159 | 0 |
"""simple docstring"""
def _A ( _a : int ):
"""simple docstring"""
A = [0] * len(_a )
A = []
A = []
A = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
A = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCAmelCase ={0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 77 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase ="\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
UpperCAmelCase ="\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
UpperCAmelCase ="\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase__ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Dict:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ,id="""token""" ) ,id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" ,id="""token""" ) ,id="""sequence""" ) ,id="""references""" ),
} ) ,codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] ,reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] ,)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_=4 ,lowerCamelCase_=False ) -> str:
A = compute_bleu(
reference_corpus=lowerCamelCase_ ,translation_corpus=lowerCamelCase_ ,max_order=lowerCamelCase_ ,smooth=lowerCamelCase_ )
((A) , (A) , (A) , (A) , (A) , (A)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 77 | 1 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any="attention" ):
__UpperCamelCase =__UpperCamelCase =np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
__UpperCamelCase =k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__UpperCamelCase =np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
__UpperCamelCase =o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__UpperCamelCase =np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
__UpperCamelCase =q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__UpperCamelCase =np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
__UpperCamelCase =v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=False ):
if split_mlp_wi:
__UpperCamelCase =params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
__UpperCamelCase =params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
__UpperCamelCase =(wi_a, wi_a)
else:
__UpperCamelCase =params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
__UpperCamelCase =params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : dict , *, SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : bool = False ):
__UpperCamelCase =traverse_util.flatten_dict(variables['target'] )
__UpperCamelCase ={'/'.join(SCREAMING_SNAKE_CASE__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__UpperCamelCase ='encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =collections.OrderedDict()
# Shared embeddings.
__UpperCamelCase =old['token_embedder/embedding']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase =tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'attention' )
__UpperCamelCase =layer_norm
__UpperCamelCase =k.T
__UpperCamelCase =o.T
__UpperCamelCase =q.T
__UpperCamelCase =v.T
# Block i, layer 1 (MLP).
__UpperCamelCase =tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase =tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =layer_norm
if split_mlp_wi:
__UpperCamelCase =wi[0].T
__UpperCamelCase =wi[1].T
else:
__UpperCamelCase =wi.T
__UpperCamelCase =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase =tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'encoder' ).T
__UpperCamelCase =old['encoder/encoder_norm/scale']
if not scalable_attention:
__UpperCamelCase =tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , 'encoder' ).T
__UpperCamelCase =tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE__ , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE__ ):
# Block i, layer 0 (Self Attention).
__UpperCamelCase =tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_self_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'self_attention' )
__UpperCamelCase =layer_norm
__UpperCamelCase =k.T
__UpperCamelCase =o.T
__UpperCamelCase =q.T
__UpperCamelCase =v.T
# Block i, layer 1 (Cross Attention).
__UpperCamelCase =tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_cross_attention_layer_norm' )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =tax_attention_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'encoder_decoder_attention' )
__UpperCamelCase =layer_norm
__UpperCamelCase =k.T
__UpperCamelCase =o.T
__UpperCamelCase =q.T
__UpperCamelCase =v.T
# Block i, layer 2 (MLP).
__UpperCamelCase =tax_layer_norm_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , 'pre_mlp_layer_norm' )
__UpperCamelCase , __UpperCamelCase =tax_mlp_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =layer_norm
if split_mlp_wi:
__UpperCamelCase =wi[0].T
__UpperCamelCase =wi[1].T
else:
__UpperCamelCase =wi.T
__UpperCamelCase =wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__UpperCamelCase =tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'decoder' ).T
__UpperCamelCase =old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__UpperCamelCase =old['decoder/logits_dense/kernel'].T
return new
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool ):
__UpperCamelCase =collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase =state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__UpperCamelCase =state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
__UpperCamelCase =state_dict['shared.weight']
return state_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =convert_tax_to_pytorch(
SCREAMING_SNAKE_CASE__ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE__ , scalable_attention=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =make_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , ):
__UpperCamelCase =MTaConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__UpperCamelCase =UMTaEncoderModel(SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE__ )
print('Done' )
if __name__ == "__main__":
_A = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
_A = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 62 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 62 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"google/pegasus-large": "https://huggingface.co/google/pegasus-large/resolve/main/config.json",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = """pegasus"""
_lowerCAmelCase : List[str] = ["""past_key_values"""]
_lowerCAmelCase : int = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCAmelCase=5_02_65 , lowerCAmelCase=10_24 , lowerCAmelCase=12 , lowerCAmelCase=40_96 , lowerCAmelCase=16 , lowerCAmelCase=12 , lowerCAmelCase=40_96 , lowerCAmelCase=16 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase="gelu" , lowerCAmelCase=10_24 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.02 , lowerCAmelCase=0 , lowerCAmelCase=False , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=1 , **lowerCAmelCase , ):
"""simple docstring"""
snake_case = vocab_size
snake_case = max_position_embeddings
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = use_cache
snake_case = encoder_layers
snake_case = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , is_encoder_decoder=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , forced_eos_token_id=lowerCAmelCase , **lowerCAmelCase , )
@property
def snake_case ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def snake_case ( self ):
"""simple docstring"""
return self.d_model
| 149 | """simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_12 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
"""simple docstring"""
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_input_mask
snake_case = use_token_type_ids
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = num_labels
snake_case = num_choices
snake_case = scope
def snake_case ( self ):
"""simple docstring"""
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = None
if self.use_input_mask:
snake_case = random_attention_mask([self.batch_size, self.seq_length] )
snake_case = None
if self.use_token_type_ids:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case = None
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case = ids_tensor([self.batch_size] , self.num_choices )
snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case ( self ):
"""simple docstring"""
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
snake_case = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ):
"""simple docstring"""
snake_case = BioGptForCausalLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# create attention mask
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
snake_case = self.seq_length // 2
snake_case = 0
# first forward pass
snake_case ,snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase ).to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
snake_case = ids_tensor((1,) , lowerCAmelCase ).item() + 1
snake_case = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
snake_case = random_other_next_tokens
# append to next input_ids and attn_mask
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=lowerCAmelCase )] , dim=1 , )
# get two different outputs
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , past_key_values=lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
snake_case = torch.ones(input_ids.shape , dtype=torch.long , device=lowerCAmelCase )
# first forward pass
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
snake_case ,snake_case = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase )['last_hidden_state']
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[
'last_hidden_state'
]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 ) )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase , lowerCAmelCase=False ):
"""simple docstring"""
snake_case = BioGptForCausalLM(lowerCAmelCase )
model.to(lowerCAmelCase )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
snake_case = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def snake_case ( self , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = BioGptModel(lowerCAmelCase )
snake_case = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def snake_case ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , *lowerCAmelCase ):
"""simple docstring"""
snake_case = self.num_labels
snake_case = BioGptForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,(
snake_case
) ,
) = config_and_inputs
snake_case = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
_lowerCAmelCase : str = (BioGptForCausalLM,) if is_torch_available() else ()
_lowerCAmelCase : str = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCAmelCase : List[str] = False
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptModelTester(self )
snake_case = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case = type
self.model_tester.create_and_check_model(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*lowerCAmelCase , gradient_checkpointing=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*lowerCAmelCase )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase )
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = 'left'
# Define PAD Token = EOS Token = 50256
snake_case = tokenizer.eos_token
snake_case = model.config.eos_token_id
# use different length sentences to test batching
snake_case = [
'Hello, my dog is a little',
'Today, I',
]
snake_case = tokenizer(lowerCAmelCase , return_tensors='pt' , padding=lowerCAmelCase )
snake_case = inputs['input_ids'].to(lowerCAmelCase )
snake_case = model.generate(
input_ids=lowerCAmelCase , attention_mask=inputs['attention_mask'].to(lowerCAmelCase ) , )
snake_case = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(lowerCAmelCase )
snake_case = model.generate(input_ids=lowerCAmelCase )
snake_case = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
snake_case = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(lowerCAmelCase )
snake_case = model.generate(input_ids=lowerCAmelCase , max_length=model.config.max_length - num_paddings )
snake_case = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
snake_case = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase )
snake_case = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase )
snake_case = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , [non_padded_sentence, padded_sentence] )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = BioGptModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
snake_case = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case ( self ):
"""simple docstring"""
snake_case ,snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = 3
snake_case = 'multi_label_classification'
snake_case = input_dict['input_ids']
snake_case = input_ids.ne(1 ).to(lowerCAmelCase )
snake_case = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
snake_case = BioGptForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
snake_case = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
snake_case = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
snake_case = model(lowerCAmelCase )[0]
snake_case = 4_23_84
snake_case = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase )
snake_case = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase , atol=1E-4 ) )
@slow
def snake_case ( self ):
"""simple docstring"""
snake_case = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
snake_case = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(lowerCAmelCase )
torch.manual_seed(0 )
snake_case = tokenizer('COVID-19 is' , return_tensors='pt' ).to(lowerCAmelCase )
snake_case = model.generate(
**lowerCAmelCase , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=lowerCAmelCase , )
snake_case = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase )
snake_case = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 149 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
@property
def a ( self : Any ) -> int:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def a ( self : Dict ) -> int:
__lowerCAmelCase = self.dummy_uncond_unet
__lowerCAmelCase = PNDMScheduler()
__lowerCAmelCase = PNDMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pndm.to(SCREAMING_SNAKE_CASE__ )
pndm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pndm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , output_type="""numpy""" ).images
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pndm(generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=20 , output_type="""numpy""" , return_dict=SCREAMING_SNAKE_CASE__ )[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def a ( self : Any ) -> Any:
__lowerCAmelCase = """google/ddpm-cifar10-32"""
__lowerCAmelCase = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = PNDMScheduler()
__lowerCAmelCase = PNDMPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
pndm.to(SCREAMING_SNAKE_CASE__ )
pndm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pndm(generator=SCREAMING_SNAKE_CASE__ , output_type="""numpy""" ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.1_5_6_4, 0.1_4_6_4_5, 0.1_4_0_6, 0.1_4_7_1_5, 0.1_2_4_2_5, 0.1_4_0_4_5, 0.1_3_1_1_5, 0.1_2_1_7_5, 0.1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 229 | '''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_A : Optional[Any] = logging.get_logger(__name__)
# General docstring
_A : Optional[Any] = '''ResNetConfig'''
# Base docstring
_A : Tuple = '''microsoft/resnet-50'''
_A : List[str] = [1, 2048, 7, 7]
# Image classification docstring
_A : str = '''microsoft/resnet-50'''
_A : Dict = '''tiger cat'''
_A : List[Any] = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" ) -> Any:
super().__init__()
__lowerCAmelCase = nn.Convad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=kernel_size // 2 , bias=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = self.convolution(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.normalization(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig ) -> List[str]:
super().__init__()
__lowerCAmelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__lowerCAmelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__lowerCAmelCase = config.num_channels
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.pooler(SCREAMING_SNAKE_CASE__ )
return embedding
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 ) -> Dict:
super().__init__()
__lowerCAmelCase = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = self.convolution(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.normalization(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" ) -> Dict:
super().__init__()
__lowerCAmelCase = in_channels != out_channels or stride != 1
__lowerCAmelCase = (
ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=SCREAMING_SNAKE_CASE__ ) , )
__lowerCAmelCase = ACTaFN[activation]
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
__lowerCAmelCase = hidden_state
__lowerCAmelCase = self.layer(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 4 ) -> int:
super().__init__()
__lowerCAmelCase = in_channels != out_channels or stride != 1
__lowerCAmelCase = out_channels // reduction
__lowerCAmelCase = (
ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ ) , )
__lowerCAmelCase = ACTaFN[activation]
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCAmelCase = hidden_state
__lowerCAmelCase = self.layer(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , ) -> int:
super().__init__()
__lowerCAmelCase = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
__lowerCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) , *[layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = input
for layer in self.layers:
__lowerCAmelCase = layer(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig ) -> Optional[int]:
super().__init__()
__lowerCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
SCREAMING_SNAKE_CASE__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE__ , config.depths[1:] ):
self.stages.append(ResNetStage(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , depth=SCREAMING_SNAKE_CASE__ ) )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ) -> BaseModelOutputWithNoAttention:
__lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCAmelCase = hidden_states + (hidden_state,)
__lowerCAmelCase = stage_module(SCREAMING_SNAKE_CASE__ )
if output_hidden_states:
__lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = ResNetConfig
_SCREAMING_SNAKE_CASE : Union[str, Any] = """resnet"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """pixel_values"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> int:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = value
_A : Dict = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_A : Optional[int] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = config
__lowerCAmelCase = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ResNetEncoder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = encoder_outputs[0]
__lowerCAmelCase = self.pooler(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = ResNetModel(SCREAMING_SNAKE_CASE__ )
# classification head
__lowerCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.resnet(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
__lowerCAmelCase = self.classifier(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase = """single_label_classification"""
else:
__lowerCAmelCase = """multi_label_classification"""
if self.config.problem_type == "regression":
__lowerCAmelCase = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCAmelCase = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase = BCEWithLogitsLoss()
__lowerCAmelCase = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
__lowerCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
super().__init__(SCREAMING_SNAKE_CASE__ )
super()._init_backbone(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = [config.embedding_size] + config.hidden_sizes
__lowerCAmelCase = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ResNetEncoder(SCREAMING_SNAKE_CASE__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BackboneOutput:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.encoder(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=SCREAMING_SNAKE_CASE__ , )
| 229 | 1 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __a ( __lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : bool = False
_lowerCAmelCase : float = 3.0
class __a ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=UpperCamelCase_ ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {"a": 2, "c": 2.2_5} )
@require_cuda
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase__ : str = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase__ : Union[str, Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , UpperCamelCase_ )
@require_multi_gpu
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = ["torchrun", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] =DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCamelCase : Optional[int] =Accelerator(kwargs_handlers=[ddp_scaler])
lowerCamelCase : Dict =torch.nn.Linear(100, 200)
lowerCamelCase : Optional[int] =accelerator.prepare(model)
# Check the values changed in kwargs
lowerCamelCase : Optional[Any] =''''''
lowerCamelCase : Dict =model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 357 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase : Optional[Any] =False
class __a ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __a ( unittest.TestCase ):
def __lowercase ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : int = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : Dict = torch.manual_seed(0 )
UpperCamelCase__ : str = pipe.dual_guided(
prompt="first prompt" , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = generator.manual_seed(0 )
UpperCamelCase__ : Dict = pipe.dual_guided(
prompt="first prompt" , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = VersatileDiffusionPipeline.from_pretrained("shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = "cyberpunk 2077"
UpperCamelCase__ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
UpperCamelCase__ : int = torch.manual_seed(0 )
UpperCamelCase__ : Any = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" , ).images
UpperCamelCase__ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : List[str] = "A painting of a squirrel eating a burger "
UpperCamelCase__ : List[Any] = torch.manual_seed(0 )
UpperCamelCase__ : Dict = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
UpperCamelCase__ : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Dict = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : Any = pipe.image_variation(SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , output_type="numpy" ).images
UpperCamelCase__ : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : int = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 196 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ :Dict = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :Optional[Any] = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ :List[str] = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
A_ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 71 |
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def UpperCAmelCase_ ( ):
lowercase = Github(os.environ['GITHUB_TOKEN'] )
lowercase = g.get_repo('huggingface/diffusers' )
lowercase = repo.get_issues(state='open' )
for issue in open_issues:
lowercase = sorted(issue.get_comments() , key=lambda __SCREAMING_SNAKE_CASE : i.created_at , reverse=__SCREAMING_SNAKE_CASE )
lowercase = comments[0] if len(__SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 195 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
A : str = logging.get_logger(__name__)
A : str = """▁"""
A : List[str] = {"""vocab_file""": """sentencepiece.bpe.model"""}
A : List[str] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
A : str = {
"""facebook/nllb-200-distilled-600M""": 1_0_2_4,
}
# fmt: off
A : Optional[Any] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class __A( lowerCAmelCase_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = []
snake_case_ = []
def __init__( self , _snake_case , _snake_case="<s>" , _snake_case="</s>" , _snake_case="</s>" , _snake_case="<s>" , _snake_case="<unk>" , _snake_case="<pad>" , _snake_case="<mask>" , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case = None , _snake_case=None , _snake_case=False , **_snake_case , ) -> Dict:
'''simple docstring'''
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
__a = {} if sp_model_kwargs is None else sp_model_kwargs
__a = legacy_behaviour
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , src_lang=__SCREAMING_SNAKE_CASE , tgt_lang=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
__a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
__a = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__a = 1
__a = len(self.sp_model )
__a = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__SCREAMING_SNAKE_CASE )
}
__a = {v: k for k, v in self.lang_code_to_id.items()}
__a = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__a = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__a = src_lang if src_lang is not None else '''eng_Latn'''
__a = self.lang_code_to_id[self._src_lang]
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Any:
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
__a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = False ) -> Dict:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
__a = [1] * len(self.prefix_tokens )
__a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
return prefix_ones + ([0] * len(__SCREAMING_SNAKE_CASE )) + ([0] * len(__SCREAMING_SNAKE_CASE )) + suffix_ones
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Union[str, Any]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Any:
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case ) -> Dict:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = self.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
__a = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
__a = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = ''''''.join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , ''' ''' ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> str:
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = "eng_Latn" , _snake_case = None , _snake_case = "fra_Latn" , **_snake_case , ) -> Optional[Any]:
'''simple docstring'''
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Tuple:
'''simple docstring'''
__a = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
__a = self.lang_code_to_id[lang]
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id] | 351 |
A : Optional[Any] = tuple[float, float, float]
A : Union[str, Any] = tuple[float, float, float]
def __lowerCAmelCase ( a__ , a__ ) -> Vectorad:
__a = end_pointa[0] - end_pointa[0]
__a = end_pointa[1] - end_pointa[1]
__a = end_pointa[2] - end_pointa[2]
return (x, y, z)
def __lowerCAmelCase ( a__ , a__ ) -> Vectorad:
__a = ab[1] * ac[2] - ab[2] * ac[1] # *i
__a = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
__a = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def __lowerCAmelCase ( a__ , a__ ) -> bool:
return tuple(round(a__ , a__ ) for x in vector ) == (0, 0, 0)
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 10 ) -> bool:
__a = create_vector(a__ , a__ )
__a = create_vector(a__ , a__ )
return is_zero_vector(get_ad_vectors_cross(a__ , a__ ) , a__ ) | 33 | 0 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a__ ( ) -> int:
"""simple docstring"""
_UpperCamelCase = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
_UpperCamelCase = Dataset.from_dict(_A )
return dataset
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def snake_case__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = get_dataset()
_UpperCamelCase = make_duplicate_clusters(UpperCAmelCase__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def snake_case__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = get_dataset()
_UpperCamelCase = deduplicate_dataset(UpperCAmelCase__ )
self.assertEqual(len(UpperCAmelCase__ ) , 2 )
print(UpperCAmelCase__ )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , UpperCAmelCase__ )
| 324 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase__ : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
UpperCAmelCase__ : Any = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
UpperCAmelCase__ : Optional[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __lowercase ( _A ) -> Tuple:
with open(_A , """rb""" ) as f:
SCREAMING_SNAKE_CASE : Any = Image.open(_A )
return im.convert("""RGB""" )
@dataclass
class a__ :
"""simple docstring"""
UpperCAmelCase__ : Optional[str] =field(
default=UpperCAmelCase , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
UpperCAmelCase__ : Optional[str] =field(
default=UpperCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCAmelCase__ : Optional[str] =field(default=UpperCAmelCase , metadata={"""help""": """A folder containing the training data."""} )
UpperCAmelCase__ : Optional[str] =field(default=UpperCAmelCase , metadata={"""help""": """A folder containing the validation data."""} )
UpperCAmelCase__ : Optional[float] =field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
UpperCAmelCase__ : Optional[int] =field(
default=UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
UpperCAmelCase__ : Optional[int] =field(
default=UpperCAmelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def _lowercase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class a__ :
"""simple docstring"""
UpperCAmelCase__ : str =field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
UpperCAmelCase__ : Optional[str] =field(
default=UpperCAmelCase , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(UpperCAmelCase )} , )
UpperCAmelCase__ : Optional[str] =field(
default=UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
UpperCAmelCase__ : Optional[str] =field(
default=UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
UpperCAmelCase__ : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
UpperCAmelCase__ : str =field(default=UpperCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""} )
UpperCAmelCase__ : bool =field(
default=UpperCAmelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
UpperCAmelCase__ : bool =field(
default=UpperCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def __lowercase ( _A ) -> str:
SCREAMING_SNAKE_CASE : Optional[Any] = torch.stack([example["""pixel_values"""] for example in examples] )
SCREAMING_SNAKE_CASE : int = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __lowercase ( ) -> Dict:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , _A , _A )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : List[str] = training_args.get_process_log_level()
logger.setLevel(_A )
transformers.utils.logging.set_verbosity(_A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
SCREAMING_SNAKE_CASE : Dict = {}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE : Dict = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE : Dict = os.path.join(data_args.validation_dir , """**""" )
SCREAMING_SNAKE_CASE : List[Any] = load_dataset(
"""imagefolder""" , data_files=_A , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE : int = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _A ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE : List[str] = dataset["""train"""].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE : Tuple = split["""train"""]
SCREAMING_SNAKE_CASE : Union[str, Any] = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE : Dict = dataset["""train"""].features["""labels"""].names
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = {}, {}
for i, label in enumerate(_A ):
SCREAMING_SNAKE_CASE : str = str(_A )
SCREAMING_SNAKE_CASE : List[Any] = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE : Union[str, Any] = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_A ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_A ) , labelaid=_A , idalabel=_A , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
SCREAMING_SNAKE_CASE : int = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE : Dict = image_processor.size["""shortest_edge"""]
else:
SCREAMING_SNAKE_CASE : Optional[int] = (image_processor.size["""height"""], image_processor.size["""width"""])
SCREAMING_SNAKE_CASE : List[str] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
SCREAMING_SNAKE_CASE : Optional[Any] = Compose(
[
RandomResizedCrop(_A ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE : List[Any] = Compose(
[
Resize(_A ),
CenterCrop(_A ),
ToTensor(),
normalize,
] )
def train_transforms(_A ):
SCREAMING_SNAKE_CASE : List[str] = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(_A ):
SCREAMING_SNAKE_CASE : Tuple = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Tuple = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_A )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : List[str] = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_A )
# Initalize our trainer
SCREAMING_SNAKE_CASE : Union[str, Any] = Trainer(
model=_A , args=_A , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=_A , tokenizer=_A , data_collator=_A , )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : int = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : int = last_checkpoint
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.train(resume_from_checkpoint=_A )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate()
trainer.log_metrics("""eval""" , _A )
trainer.save_metrics("""eval""" , _A )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE : Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_A )
else:
trainer.create_model_card(**_A )
if __name__ == "__main__":
main()
| 245 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
if isinstance(snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(snake_case , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(snake_case ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : List[Any] = ["pixel_values"]
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BILINEAR , a__ = True , a__ = None , a__ = True , a__ = 1 / 255 , a__ = True , a__ = None , a__ = None , **a__ , ) -> None:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = size if size is not None else {"shortest_edge": 224}
snake_case_ = get_size_dict(a__ , default_to_square=a__ )
snake_case_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case_ = get_size_dict(a__ , param_name="crop_size" )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = resample
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase__ ( self , a__ , a__ , a__ = PILImageResampling.BILINEAR , a__ = None , **a__ , ) -> np.ndarray:
'''simple docstring'''
snake_case_ = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" in size:
snake_case_ = get_resize_output_image_size(a__ , size["shortest_edge"] , default_to_square=a__ )
elif "height" in size and "width" in size:
snake_case_ = (size["height"], size["width"])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ = None , **a__ , ) -> np.ndarray:
'''simple docstring'''
snake_case_ = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(a__ , size=(size["height"], size["width"]) , data_format=a__ , **a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ = None , **a__ , ) -> List[str]:
'''simple docstring'''
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ = None , **a__ , ) -> np.ndarray:
'''simple docstring'''
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def lowerCAmelCase__ ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ = to_numpy_array(a__ )
if do_resize:
snake_case_ = self.resize(image=a__ , size=a__ , resample=a__ )
if do_center_crop:
snake_case_ = self.center_crop(a__ , size=a__ )
if do_rescale:
snake_case_ = self.rescale(image=a__ , scale=a__ )
if do_normalize:
snake_case_ = self.normalize(image=a__ , mean=a__ , std=a__ )
snake_case_ = to_channel_dimension_format(a__ , a__ )
return image
def lowerCAmelCase__ ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(a__ , default_to_square=a__ )
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(a__ , param_name="crop_size" )
if not valid_images(a__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
snake_case_ = make_batched(a__ )
snake_case_ = [
[
self._preprocess_image(
image=a__ , do_resize=a__ , size=a__ , resample=a__ , do_center_crop=a__ , crop_size=a__ , do_rescale=a__ , rescale_factor=a__ , do_normalize=a__ , image_mean=a__ , image_std=a__ , data_format=a__ , )
for img in video
]
for video in videos
]
snake_case_ = {"pixel_values": videos}
return BatchFeature(data=a__ , tensor_type=a__ )
| 92 |
'''simple docstring'''
def UpperCamelCase_( snake_case : list[int] , snake_case : int ):
'''simple docstring'''
snake_case_ = len(snake_case )
snake_case_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
snake_case_ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
snake_case_ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
snake_case_ = subset[i - 1][j]
if arr[i - 1] <= j:
snake_case_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = '''transfo-xl'''
_lowerCamelCase: Tuple = ['''mems''']
_lowerCamelCase: Tuple = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] ,A_ : Optional[int]=26_7735 ,A_ : Tuple=[2_0000, 4_0000, 20_0000] ,A_ : int=1024 ,A_ : List[str]=1024 ,A_ : str=16 ,A_ : str=64 ,A_ : Optional[int]=4096 ,A_ : Optional[Any]=4 ,A_ : List[str]=False ,A_ : Dict=18 ,A_ : str=1600 ,A_ : Dict=1000 ,A_ : List[str]=True ,A_ : Optional[Any]=True ,A_ : int=0 ,A_ : Any=-1 ,A_ : Union[str, Any]=True ,A_ : List[Any]=0.1 ,A_ : List[Any]=0.0 ,A_ : int=True ,A_ : Dict="normal" ,A_ : Optional[int]=0.01 ,A_ : Optional[Any]=0.01 ,A_ : int=0.02 ,A_ : str=1e-5 ,A_ : Union[str, Any]=0 ,**A_ : Dict ,) -> int:
A = vocab_size
A = []
self.cutoffs.extend(A_ )
if proj_share_all_but_first:
A = [False] + [True] * len(self.cutoffs )
else:
A = [False] + [False] * len(self.cutoffs )
A = d_model
A = d_embed
A = d_head
A = d_inner
A = div_val
A = pre_lnorm
A = n_layer
A = n_head
A = mem_len
A = same_length
A = attn_type
A = clamp_len
A = sample_softmax
A = adaptive
A = dropout
A = dropatt
A = untie_r
A = init
A = init_range
A = proj_init_std
A = init_std
A = layer_norm_epsilon
super().__init__(eos_token_id=A_ ,**A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
# Message copied from Transformer-XL documentation
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ) -> str:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.' ) | 74 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase = 16
_lowercase = 32
def _snake_case ( snake_case__ : Accelerator , snake_case__ : int = 16 , snake_case__ : str = "bert-base-cased" ):
A = AutoTokenizer.from_pretrained(snake_case__ )
A = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
snake_case__ , batched=snake_case__ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=snake_case__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case__ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(snake_case__ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
A = DataLoader(
tokenized_datasets['validation'] , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=snake_case__ )
return train_dataloader, eval_dataloader
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
# Initialize accelerator
A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config['lr']
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
A = args.model_name_or_path
set_seed(snake_case__ )
A , A = get_dataloaders(snake_case__ , snake_case__ , snake_case__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained(snake_case__ , return_dict=snake_case__ )
# Instantiate optimizer
A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A = optimizer_cls(params=model.parameters() , lr=snake_case__ )
if accelerator.state.deepspeed_plugin is not None:
A = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A = 1
A = (len(snake_case__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A = get_linear_schedule_with_warmup(
optimizer=snake_case__ , num_warmup_steps=0 , num_training_steps=snake_case__ , )
else:
A = DummyScheduler(snake_case__ , total_num_steps=snake_case__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A , A , A , A , A = accelerator.prepare(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# We need to keep track of how many total steps we have iterated over
A = 0
# We also need to keep track of the stating epoch so files are named properly
A = 0
# Now we train the model
A = evaluate.load('glue' , 'mrpc' )
A = 0
A = {}
for epoch in range(snake_case__ , snake_case__ ):
model.train()
for step, batch in enumerate(snake_case__ ):
A = model(**snake_case__ )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(snake_case__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A = 0
for step, batch in enumerate(snake_case__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**snake_case__ )
A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A , A = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case__ ) - 1:
A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case__ , references=snake_case__ , )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , snake_case__ )
A = eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
A = eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(snake_case__ , snake_case__ )
def _snake_case ( ):
A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=snake_case__ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=snake_case__ , )
parser.add_argument(
'--output_dir' , type=snake_case__ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=snake_case__ , default=snake_case__ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=snake_case__ , default=3 , help='Number of train epochs.' , )
A = parser.parse_args()
A = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(snake_case__ , snake_case__ )
if __name__ == "__main__":
main() | 74 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319 |
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
snake_case = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
snake_case = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = EfficientNetConfig()
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["hidden_dim"]
SCREAMING_SNAKE_CASE : Tuple = CONFIG_MAP[model_name]["width_coef"]
SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAP[model_name]["depth_coef"]
SCREAMING_SNAKE_CASE : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = CONFIG_MAP[model_name]["dropout_rate"]
SCREAMING_SNAKE_CASE : str = CONFIG_MAP[model_name]["dw_padding"]
SCREAMING_SNAKE_CASE : str = "huggingface/label-files"
SCREAMING_SNAKE_CASE : str = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : str = 1000
SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : Tuple = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : int = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=lowercase , )
return preprocessor
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
SCREAMING_SNAKE_CASE : List[str] = sorted(set(lowercase ) )
SCREAMING_SNAKE_CASE : List[str] = len(lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )}
SCREAMING_SNAKE_CASE : Dict = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
SCREAMING_SNAKE_CASE : Tuple = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
SCREAMING_SNAKE_CASE : int = {}
for item in rename_keys:
if item[0] in original_param_names:
SCREAMING_SNAKE_CASE : Any = "efficientnet." + item[1]
SCREAMING_SNAKE_CASE : Optional[Any] = "classifier.weight"
SCREAMING_SNAKE_CASE : List[str] = "classifier.bias"
return key_mapping
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
SCREAMING_SNAKE_CASE : str = key_mapping[key]
if "_conv" in key and "kernel" in key:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(np.transpose(lowercase ) )
else:
SCREAMING_SNAKE_CASE : Dict = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = model_classes[model_name](
include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , )
SCREAMING_SNAKE_CASE : List[Any] = original_model.trainable_variables
SCREAMING_SNAKE_CASE : Dict = original_model.non_trainable_variables
SCREAMING_SNAKE_CASE : Dict = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
SCREAMING_SNAKE_CASE : Tuple = param.numpy()
SCREAMING_SNAKE_CASE : Tuple = list(tf_params.keys() )
# Load HuggingFace model
SCREAMING_SNAKE_CASE : Tuple = get_efficientnet_config(lowercase )
SCREAMING_SNAKE_CASE : str = EfficientNetForImageClassification(lowercase ).eval()
SCREAMING_SNAKE_CASE : Dict = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
SCREAMING_SNAKE_CASE : Dict = rename_keys(lowercase )
replace_params(lowercase , lowercase , lowercase )
# Initialize preprocessor and preprocess input image
SCREAMING_SNAKE_CASE : Optional[int] = convert_image_processor(lowercase )
SCREAMING_SNAKE_CASE : int = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = hf_model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits.detach().numpy()
# Original model inference
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : List[str] = CONFIG_MAP[model_name]["image_size"]
SCREAMING_SNAKE_CASE : Any = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
SCREAMING_SNAKE_CASE : Tuple = image.img_to_array(lowercase )
SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(lowercase , axis=0 )
SCREAMING_SNAKE_CASE : Any = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
snake_case = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 319 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.