code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCamelCase : Dict = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[int] = ['''input_values''', '''attention_mask''']
def __init__( self : Tuple , __a : int = 1 , __a : int = 16000 , __a : float = 0.0 , __a : bool = False , __a : int = 80 , __a : int = 16 , __a : int = 64 , __a : str = "hann_window" , __a : float = 1.0 , __a : float = 80 , __a : float = 7600 , __a : float = 1E-10 , __a : int = 2 , __a : bool = True , **__a : Tuple , ) -> Optional[int]:
"""simple docstring"""
super().__init__(feature_size=__a , sampling_rate=__a , padding_value=__a , **__a )
__lowercase : Optional[Any] = do_normalize
__lowercase : List[Any] = return_attention_mask
__lowercase : List[str] = num_mel_bins
__lowercase : Any = hop_length
__lowercase : Optional[Any] = win_length
__lowercase : str = win_function
__lowercase : Union[str, Any] = frame_signal_scale
__lowercase : List[str] = fmin
__lowercase : Optional[int] = fmax
__lowercase : Any = mel_floor
__lowercase : Dict = reduction_factor
__lowercase : Any = win_length * sampling_rate // 1000
__lowercase : List[Any] = hop_length * sampling_rate // 1000
__lowercase : Any = optimal_fft_length(self.sample_size )
__lowercase : Optional[Any] = (self.n_fft // 2) + 1
__lowercase : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__a )
__lowercase : str = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , __a , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , __a , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCAmelCase ( __a : List[np.ndarray] , __a : List[np.ndarray] , __a : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
__lowercase : int = np.array(__a , np.intaa )
__lowercase : Union[str, Any] = []
for vector, length in zip(__a , attention_mask.sum(-1 ) ):
__lowercase : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__lowercase : Union[str, Any] = padding_value
normed_input_values.append(__a )
else:
__lowercase : Any = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def lowerCAmelCase ( self : Optional[Any] , __a : np.ndarray , ) -> np.ndarray:
"""simple docstring"""
__lowercase : List[str] = spectrogram(
__a , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self : Any , __a : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __a : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , __a : Union[bool, str, PaddingStrategy] = False , __a : Optional[int] = None , __a : bool = False , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[int] = None , **__a : Optional[Any] , ) -> BatchFeature:
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""" )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if audio is not None:
__lowercase : Tuple = self._process_audio(
__a , __a , __a , __a , __a , __a , __a , __a , **__a , )
else:
__lowercase : List[Any] = None
if audio_target is not None:
__lowercase : int = self._process_audio(
__a , __a , __a , __a , __a , __a , __a , __a , **__a , )
if inputs is None:
return inputs_target
else:
__lowercase : Optional[Any] = inputs_target["""input_values"""]
__lowercase : Any = inputs_target.get("""attention_mask""" )
if decoder_attention_mask is not None:
__lowercase : Tuple = decoder_attention_mask
return inputs
def lowerCAmelCase ( self : Dict , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : bool = False , __a : Union[bool, str, PaddingStrategy] = False , __a : Optional[int] = None , __a : bool = False , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ) -> BatchFeature:
"""simple docstring"""
__lowercase : List[Any] = isinstance(__a , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
__lowercase : Dict = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase : Any = [np.asarray(__a , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
__lowercase : List[Any] = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
__lowercase : List[str] = speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase : Optional[int] = [speech]
# needed to make pad() work on spectrogram inputs
__lowercase : List[str] = self.feature_size
# convert into correct format for padding
if is_target:
__lowercase : Any = [self._extract_mel_features(__a ) for waveform in speech]
__lowercase : List[str] = BatchFeature({"""input_values""": features} )
__lowercase : Tuple = self.num_mel_bins
else:
__lowercase : str = BatchFeature({"""input_values""": speech} )
__lowercase : Any = self.pad(
__a , padding=__a , max_length=__a , truncation=__a , pad_to_multiple_of=__a , return_attention_mask=__a , **__a , )
__lowercase : Dict = feature_size_hack
# convert input values to correct format
__lowercase : Optional[int] = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray ):
__lowercase : Optional[Any] = [np.asarray(__a , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(__a , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
__lowercase : Optional[int] = [array.astype(np.floataa ) for array in input_values]
elif isinstance(__a , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
__lowercase : Tuple = input_values.astype(np.floataa )
# convert attention_mask to correct format
__lowercase : Union[str, Any] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__lowercase : Union[str, Any] = [np.asarray(__a , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
__lowercase : str = (
attention_mask
if self._get_padding_strategies(__a , max_length=__a ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__lowercase : str = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=__a , padding_value=self.padding_value )
if return_tensors is not None:
__lowercase : List[Any] = padded_inputs.convert_to_tensors(__a )
return padded_inputs
def lowerCAmelCase ( self : int ) -> Dict[str, Any]:
"""simple docstring"""
__lowercase : int = super().to_dict()
# Don't serialize these as they are derived from the other properties.
__lowercase : str = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output | 233 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : int , __a : Tuple=13 , __a : int=32 , __a : List[Any]=3 , __a : Dict=4 , __a : List[str]=[10, 20, 30, 40] , __a : Any=[2, 2, 3, 2] , __a : List[str]=True , __a : Any=True , __a : Optional[Any]=37 , __a : Dict="gelu" , __a : Tuple=10 , __a : Dict=0.02 , __a : Optional[Any]=["stage2", "stage3", "stage4"] , __a : Optional[int]=[2, 3, 4] , __a : int=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : str = batch_size
__lowercase : List[str] = image_size
__lowercase : int = num_channels
__lowercase : Optional[int] = num_stages
__lowercase : str = hidden_sizes
__lowercase : Dict = depths
__lowercase : List[str] = is_training
__lowercase : Optional[Any] = use_labels
__lowercase : Optional[Any] = intermediate_size
__lowercase : Tuple = hidden_act
__lowercase : List[Any] = num_labels
__lowercase : Dict = initializer_range
__lowercase : List[str] = out_features
__lowercase : str = out_indices
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : Optional[int] = None
if self.use_labels:
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase ( self : int , __a : Tuple , __a : Dict , __a : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : str = ConvNextVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : Any , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = ConvNextVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : str , __a : Any , __a : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = ConvNextVaBackbone(config=__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase : List[str] = None
__lowercase : str = ConvNextVaBackbone(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase : Optional[int] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Dict = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase : Optional[Any] = config_and_inputs
__lowercase : int = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Any = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_A : Any = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_A : str = False
_A : Dict = False
_A : str = False
_A : str = False
_A : Dict = False
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = ConvNextVaModelTester(self )
__lowercase : Optional[int] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowercase , __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
__lowercase : str = True
if model_class.__name__ in [
*get_values(__a ),
*get_values(__a ),
]:
continue
__lowercase : Union[str, Any] = model_class(__a )
model.to(__a )
model.train()
__lowercase : Tuple = self._prepare_for_class(__a , __a , return_labels=__a )
__lowercase : Dict = model(**__a ).loss
loss.backward()
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowercase , __lowercase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__lowercase : Dict = False
__lowercase : Optional[Any] = True
if (
model_class.__name__
in [*get_values(__a ), *get_values(__a )]
or not model_class.supports_gradient_checkpointing
):
continue
__lowercase : List[Any] = model_class(__a )
model.to(__a )
model.gradient_checkpointing_enable()
model.train()
__lowercase : List[str] = self._prepare_for_class(__a , __a , return_labels=__a )
__lowercase : int = model(**__a ).loss
loss.backward()
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : int = model_class(__a )
__lowercase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : str = [*signature.parameters.keys()]
__lowercase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(__a : Any , __a : Tuple , __a : Tuple ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase : Any = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : int = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = ConvNextVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(__a )
__lowercase : Dict = self.default_image_processor
__lowercase : Any = prepare_img()
__lowercase : int = preprocessor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : int = model(**__a )
# verify the logits
__lowercase : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : Tuple = torch.tensor([0.9996, 0.1966, -0.4386] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) ) | 233 | 1 |
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__snake_case : List[str] =re.compile(R'\b(a|an|the)\b', re.UNICODE)
__snake_case : Optional[Any] =None
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = argparse.ArgumentParser('''Official evaluation script for SQuAD version 2.0.''')
parser.add_argument('''data_file''' ,metavar='''data.json''' ,help='''Input data JSON file.''')
parser.add_argument('''pred_file''' ,metavar='''pred.json''' ,help='''Model predictions.''')
parser.add_argument(
'''--out-file''' ,'''-o''' ,metavar='''eval.json''' ,help='''Write accuracy metrics to file (default is stdout).''')
parser.add_argument(
'''--na-prob-file''' ,'''-n''' ,metavar='''na_prob.json''' ,help='''Model estimates of probability of no answer.''')
parser.add_argument(
'''--na-prob-thresh''' ,'''-t''' ,type=lowerCamelCase_ ,default=1.0 ,help='''Predict "" if no-answer probability exceeds this (default = 1.0).''' ,)
parser.add_argument(
'''--out-image-dir''' ,'''-p''' ,metavar='''out_images''' ,default=lowerCamelCase_ ,help='''Save precision-recall curves to directory.''')
parser.add_argument('''--verbose''' ,'''-v''' ,action='''store_true''')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Dict = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase__ : int = bool(qa['''answers''']['''text'''])
return qid_to_has_ans
def lowerCAmelCase__ ( lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
def remove_articles(lowerCamelCase_ : Tuple):
return ARTICLES_REGEX.sub(''' ''' ,lowerCamelCase_)
def white_space_fix(lowerCamelCase_ : Optional[int]):
return " ".join(text.split())
def remove_punc(lowerCamelCase_ : str):
lowerCAmelCase__ : Any = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase_ : Dict):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase_))))
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any]):
'''simple docstring'''
if not s:
return []
return normalize_answer(lowerCamelCase_).split()
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : List[Any]):
'''simple docstring'''
return int(normalize_answer(lowerCamelCase_) == normalize_answer(lowerCamelCase_))
def lowerCAmelCase__ ( lowerCamelCase_ : Tuple ,lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ : List[str] = get_tokens(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = get_tokens(lowerCamelCase_)
lowerCAmelCase__ : Dict = collections.Counter(lowerCamelCase_) & collections.Counter(lowerCamelCase_)
lowerCAmelCase__ : Dict = sum(common.values())
if len(lowerCamelCase_) == 0 or len(lowerCamelCase_) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
lowerCAmelCase__ : List[str] = 1.0 * num_same / len(lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = 1.0 * num_same / len(lowerCamelCase_)
lowerCAmelCase__ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Tuple):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = {}
lowerCAmelCase__ : List[str] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
lowerCAmelCase__ : Dict = qa['''id''']
lowerCAmelCase__ : str = [t for t in qa['''answers''']['''text'''] if normalize_answer(lowerCamelCase_)]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
lowerCAmelCase__ : Dict = ['''''']
if qid not in preds:
print(f"""Missing prediction for {qid}""")
continue
lowerCAmelCase__ : List[Any] = preds[qid]
# Take max over all gold answers
lowerCAmelCase__ : Optional[int] = max(compute_exact(lowerCamelCase_ ,lowerCamelCase_) for a in gold_answers)
lowerCAmelCase__ : Any = max(compute_fa(lowerCamelCase_ ,lowerCamelCase_) for a in gold_answers)
return exact_scores, fa_scores
def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : str ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = {}
for qid, s in scores.items():
lowerCAmelCase__ : List[str] = na_probs[qid] > na_prob_thresh
if pred_na:
lowerCAmelCase__ : List[Any] = float(not qid_to_has_ans[qid])
else:
lowerCAmelCase__ : Optional[Any] = s
return new_scores
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Tuple=None):
'''simple docstring'''
if not qid_list:
lowerCAmelCase__ : Union[str, Any] = len(lowerCamelCase_)
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores.values()) / total),
('''f1''', 100.0 * sum(fa_scores.values()) / total),
('''total''', total),
])
else:
lowerCAmelCase__ : Optional[Any] = len(lowerCamelCase_)
return collections.OrderedDict(
[
('''exact''', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
('''f1''', 100.0 * sum(fa_scores[k] for k in qid_list) / total),
('''total''', total),
])
def lowerCAmelCase__ ( lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : int):
'''simple docstring'''
for k in new_eval:
lowerCAmelCase__ : Tuple = new_eval[k]
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Any ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
plt.step(lowerCamelCase_ ,lowerCamelCase_ ,color='''b''' ,alpha=0.2 ,where='''post''')
plt.fill_between(lowerCamelCase_ ,lowerCamelCase_ ,step='''post''' ,alpha=0.2 ,color='''b''')
plt.xlabel('''Recall''')
plt.ylabel('''Precision''')
plt.xlim([0.0, 1.05])
plt.ylim([0.0, 1.05])
plt.title(lowerCamelCase_)
plt.savefig(lowerCamelCase_)
plt.clf()
def lowerCAmelCase__ ( lowerCamelCase_ : Any ,lowerCamelCase_ : Dict ,lowerCamelCase_ : List[str] ,lowerCamelCase_ : Tuple ,lowerCamelCase_ : Tuple=None ,lowerCamelCase_ : Optional[int]=None):
'''simple docstring'''
lowerCAmelCase__ : str = sorted(lowerCamelCase_ ,key=lambda lowerCamelCase_: na_probs[k])
lowerCAmelCase__ : List[str] = 0.0
lowerCAmelCase__ : Optional[Any] = 1.0
lowerCAmelCase__ : Optional[Any] = 0.0
lowerCAmelCase__ : Optional[Any] = [1.0]
lowerCAmelCase__ : Tuple = [0.0]
lowerCAmelCase__ : List[str] = 0.0
for i, qid in enumerate(lowerCamelCase_):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
lowerCAmelCase__ : List[Any] = true_pos / float(i + 1)
lowerCAmelCase__ : Any = true_pos / float(lowerCamelCase_)
if i == len(lowerCamelCase_) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(lowerCamelCase_)
recalls.append(lowerCamelCase_)
if out_image:
plot_pr_curve(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
return {"ap": 100.0 * avg_prec}
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : int ,lowerCamelCase_ : str ,lowerCamelCase_ : Tuple ,lowerCamelCase_ : Any ,lowerCamelCase_ : List[str]):
'''simple docstring'''
if out_image_dir and not os.path.exists(lowerCamelCase_):
os.makedirs(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
lowerCAmelCase__ : Optional[Any] = make_precision_recall_eval(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,out_image=os.path.join(lowerCamelCase_ ,'''pr_exact.png''') ,title='''Precision-Recall curve for Exact Match score''' ,)
lowerCAmelCase__ : Any = make_precision_recall_eval(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,out_image=os.path.join(lowerCamelCase_ ,'''pr_f1.png''') ,title='''Precision-Recall curve for F1 score''' ,)
lowerCAmelCase__ : Optional[Any] = {k: float(lowerCamelCase_) for k, v in qid_to_has_ans.items()}
lowerCAmelCase__ : List[Any] = make_precision_recall_eval(
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,out_image=os.path.join(lowerCamelCase_ ,'''pr_oracle.png''') ,title='''Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)''' ,)
merge_eval(lowerCamelCase_ ,lowerCamelCase_ ,'''pr_exact''')
merge_eval(lowerCamelCase_ ,lowerCamelCase_ ,'''pr_f1''')
merge_eval(lowerCamelCase_ ,lowerCamelCase_ ,'''pr_oracle''')
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : List[Any] ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
if not qid_list:
return
lowerCAmelCase__ : List[str] = [na_probs[k] for k in qid_list]
lowerCAmelCase__ : Optional[Any] = np.ones_like(lowerCamelCase_) / float(len(lowerCamelCase_))
plt.hist(lowerCamelCase_ ,weights=lowerCamelCase_ ,bins=20 ,range=(0.0, 1.0))
plt.xlabel('''Model probability of no-answer''')
plt.ylabel('''Proportion of dataset''')
plt.title(f"""Histogram of no-answer probability: {name}""")
plt.savefig(os.path.join(lowerCamelCase_ ,f"""na_prob_hist_{name}.png"""))
plt.clf()
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : Optional[int] ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Optional[Any]):
'''simple docstring'''
lowerCAmelCase__ : List[str] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
lowerCAmelCase__ : int = num_no_ans
lowerCAmelCase__ : str = cur_score
lowerCAmelCase__ : Tuple = 0.0
lowerCAmelCase__ : Tuple = sorted(lowerCamelCase_ ,key=lambda lowerCamelCase_: na_probs[k])
for i, qid in enumerate(lowerCamelCase_):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
lowerCAmelCase__ : Any = scores[qid]
else:
if preds[qid]:
lowerCAmelCase__ : str = -1
else:
lowerCAmelCase__ : int = 0
cur_score += diff
if cur_score > best_score:
lowerCAmelCase__ : Any = cur_score
lowerCAmelCase__ : Any = na_probs[qid]
return 100.0 * best_score / len(lowerCamelCase_), best_thresh
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ,lowerCamelCase_ : List[str] ,lowerCamelCase_ : List[str] ,lowerCamelCase_ : Dict ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = find_best_thresh(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Optional[Any] = find_best_thresh(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Any = best_exact
lowerCAmelCase__ : str = exact_thresh
lowerCAmelCase__ : Dict = best_fa
lowerCAmelCase__ : Optional[Any] = fa_thresh
def lowerCAmelCase__ ( ):
'''simple docstring'''
with open(OPTS.data_file) as f:
lowerCAmelCase__ : List[Any] = json.load(lowerCamelCase_)
lowerCAmelCase__ : int = dataset_json['''data''']
with open(OPTS.pred_file) as f:
lowerCAmelCase__ : Any = json.load(lowerCamelCase_)
if OPTS.na_prob_file:
with open(OPTS.na_prob_file) as f:
lowerCAmelCase__ : List[Any] = json.load(lowerCamelCase_)
else:
lowerCAmelCase__ : str = {k: 0.0 for k in preds}
lowerCAmelCase__ : Union[str, Any] = make_qid_to_has_ans(lowerCamelCase_) # maps qid to True/False
lowerCAmelCase__ : List[Any] = [k for k, v in qid_to_has_ans.items() if v]
lowerCAmelCase__ : Optional[int] = [k for k, v in qid_to_has_ans.items() if not v]
lowerCAmelCase__ : Union[str, Any] = get_raw_scores(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Dict = apply_no_ans_threshold(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,OPTS.na_prob_thresh)
lowerCAmelCase__ : Tuple = apply_no_ans_threshold(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,OPTS.na_prob_thresh)
lowerCAmelCase__ : Any = make_eval_dict(lowerCamelCase_ ,lowerCamelCase_)
if has_ans_qids:
lowerCAmelCase__ : Optional[int] = make_eval_dict(lowerCamelCase_ ,lowerCamelCase_ ,qid_list=lowerCamelCase_)
merge_eval(lowerCamelCase_ ,lowerCamelCase_ ,'''HasAns''')
if no_ans_qids:
lowerCAmelCase__ : List[Any] = make_eval_dict(lowerCamelCase_ ,lowerCamelCase_ ,qid_list=lowerCamelCase_)
merge_eval(lowerCamelCase_ ,lowerCamelCase_ ,'''NoAns''')
if OPTS.na_prob_file:
find_all_best_thresh(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_)
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,OPTS.out_image_dir)
histogram_na_prob(lowerCamelCase_ ,lowerCamelCase_ ,OPTS.out_image_dir ,'''hasAns''')
histogram_na_prob(lowerCamelCase_ ,lowerCamelCase_ ,OPTS.out_image_dir ,'''noAns''')
if OPTS.out_file:
with open(OPTS.out_file ,'''w''') as f:
json.dump(lowerCamelCase_ ,lowerCamelCase_)
else:
print(json.dumps(lowerCamelCase_ ,indent=2))
if __name__ == "__main__":
__snake_case : List[str] =parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
main()
| 360 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def lowerCAmelCase__ ( lowerCamelCase_ : ndarray):
'''simple docstring'''
return np.dot(lowerCamelCase_ ,lowerCamelCase_)
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,*,
__lowerCamelCase = np.inf ,__lowerCamelCase = "linear" ,__lowerCamelCase = 0.0 ,) -> None:
"""simple docstring"""
lowerCAmelCase__ : Any = regularization
lowerCAmelCase__ : str = gamma
if kernel == "linear":
lowerCAmelCase__ : Dict = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma ,(float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
lowerCAmelCase__ : Optional[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
lowerCAmelCase__ : List[str] = f"""Unknown kernel: {kernel}"""
raise ValueError(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> float:
"""simple docstring"""
return np.dot(__lowerCamelCase ,__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> float:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : str = observations
lowerCAmelCase__ : Optional[int] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((lowerCAmelCase__) , ) : List[str] = np.shape(__lowerCamelCase )
def to_minimize(__lowerCamelCase ) -> float:
lowerCAmelCase__ : List[str] = 0
((lowerCAmelCase__) , ) : str = np.shape(__lowerCamelCase )
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] ,observations[j] )
)
return 1 / 2 * s - sum(__lowerCamelCase )
lowerCAmelCase__ : List[str] = LinearConstraint(__lowerCamelCase ,0 ,0 )
lowerCAmelCase__ : List[str] = Bounds(0 ,self.regularization )
lowerCAmelCase__ : int = minimize(
__lowerCamelCase ,np.ones(__lowerCamelCase ) ,bounds=__lowerCamelCase ,constraints=[ly_contraint] ).x
lowerCAmelCase__ : List[Any] = l_star
# calculating mean offset of separation plane to points
lowerCAmelCase__ : Optional[Any] = 0
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] ,observations[j] )
lowerCAmelCase__ : Dict = s / n
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] ,__lowerCamelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
A__ = params
A__ = np.array(A_ )
A__ = np.array([len(A_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self,__lowerCamelCase ):
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
return len(self.lengths )
def UpperCamelCase ( self ):
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def UpperCamelCase ( self ):
A__ = self.params.max_model_input_size
A__ = self.lengths > max_len
logger.info(f"Splitting {sum(A_ )} too long sequences." )
def divide_chunks(__lowerCamelCase,__lowerCamelCase ):
return [l[i : i + n] for i in range(0,len(A_ ),A_ )]
A__ = []
A__ = []
if self.params.mlm:
A__ = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
A__ = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids,self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
A__ = []
for sub_s in divide_chunks(seq_,max_len - 2 ):
if sub_s[0] != cls_id:
A__ = np.insert(A_,0,A_ )
if sub_s[-1] != sep_id:
A__ = np.insert(A_,len(A_ ),A_ )
assert len(A_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A_ )
new_tok_ids.extend(A_ )
new_lengths.extend([len(A_ ) for l in sub_seqs] )
A__ = np.array(A_ )
A__ = np.array(A_ )
def UpperCamelCase ( self ):
A__ = len(self )
A__ = self.lengths > 11
A__ = self.token_ids[indices]
A__ = self.lengths[indices]
A__ = len(self )
logger.info(f"Remove {init_size - new_size} too short (<=11 tokens) sequences." )
def UpperCamelCase ( self ):
if "unk_token" not in self.params.special_tok_ids:
return
else:
A__ = self.params.special_tok_ids["unk_token"]
A__ = len(self )
A__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
A__ = (unk_occs / self.lengths) < 0.5
A__ = self.token_ids[indices]
A__ = self.lengths[indices]
A__ = len(self )
logger.info(f"Remove {init_size - new_size} sequences with a high level of unknown tokens (50%)." )
def UpperCamelCase ( self ):
if not self.params.is_master:
return
logger.info(f"{len(self )} sequences" )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = [t[0] for t in batch]
A__ = [t[1] for t in batch]
assert len(A_ ) == len(A_ )
# Max for paddings
A__ = max(A_ )
# Pad token ids
if self.params.mlm:
A__ = self.params.special_tok_ids["pad_token"]
else:
A__ = self.params.special_tok_ids["unk_token"]
A__ = [list(t.astype(A_ ) ) + [pad_idx] * (max_seq_len_ - len(A_ )) for t in token_ids]
assert len(tk_ ) == len(A_ )
assert all(len(A_ ) == max_seq_len_ for t in tk_ )
A__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
A__ = torch.tensor(A_ ) # (bs)
return tk_t, lg_t
| 193 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A_ ( ) -> List[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_lowerCAmelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def A_ ( ) -> Tuple:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def A_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_lowerCAmelCase ):
http_head("https://huggingface.co" )
| 52 | 0 |
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
a :Optional[int] = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
a :Any = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
a :List[str] = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
a :Any = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
a :Union[str, Any] = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
a :str = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def _lowercase ( __lowerCAmelCase ) -> Optional[Any]:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Dict:
SCREAMING_SNAKE_CASE__ : Dict = checkpoint[F'''{old_prefix}.in_layers.0.weight''']
SCREAMING_SNAKE_CASE__ : int = checkpoint[F'''{old_prefix}.in_layers.0.bias''']
SCREAMING_SNAKE_CASE__ : Tuple = checkpoint[F'''{old_prefix}.in_layers.2.weight''']
SCREAMING_SNAKE_CASE__ : Tuple = checkpoint[F'''{old_prefix}.in_layers.2.bias''']
SCREAMING_SNAKE_CASE__ : int = checkpoint[F'''{old_prefix}.emb_layers.1.weight''']
SCREAMING_SNAKE_CASE__ : str = checkpoint[F'''{old_prefix}.emb_layers.1.bias''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint[F'''{old_prefix}.out_layers.0.weight''']
SCREAMING_SNAKE_CASE__ : Tuple = checkpoint[F'''{old_prefix}.out_layers.0.bias''']
SCREAMING_SNAKE_CASE__ : Dict = checkpoint[F'''{old_prefix}.out_layers.3.weight''']
SCREAMING_SNAKE_CASE__ : List[Any] = checkpoint[F'''{old_prefix}.out_layers.3.bias''']
if has_skip:
SCREAMING_SNAKE_CASE__ : List[Any] = checkpoint[F'''{old_prefix}.skip_connection.weight''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint[F'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = checkpoint[F'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = checkpoint[F'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint[F'''{old_prefix}.norm.weight''']
SCREAMING_SNAKE_CASE__ : str = checkpoint[F'''{old_prefix}.norm.bias''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = weight_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Tuple = weight_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : str = bias_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = weight_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = bias_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : List[Any] = (
checkpoint[F'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
SCREAMING_SNAKE_CASE__ : Optional[int] = checkpoint[F'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(__lowerCAmelCase , map_location="""cpu""" )
SCREAMING_SNAKE_CASE__ : Any = {}
SCREAMING_SNAKE_CASE__ : List[str] = checkpoint["""time_embed.0.weight"""]
SCREAMING_SNAKE_CASE__ : List[str] = checkpoint["""time_embed.0.bias"""]
SCREAMING_SNAKE_CASE__ : Any = checkpoint["""time_embed.2.weight"""]
SCREAMING_SNAKE_CASE__ : Dict = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
SCREAMING_SNAKE_CASE__ : List[str] = checkpoint["""label_emb.weight"""]
SCREAMING_SNAKE_CASE__ : List[str] = checkpoint["""input_blocks.0.0.weight"""]
SCREAMING_SNAKE_CASE__ : Dict = checkpoint["""input_blocks.0.0.bias"""]
SCREAMING_SNAKE_CASE__ : int = unet_config["""down_block_types"""]
SCREAMING_SNAKE_CASE__ : Dict = unet_config["""layers_per_block"""]
SCREAMING_SNAKE_CASE__ : List[Any] = unet_config["""attention_head_dim"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = unet_config["""block_out_channels"""]
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1
SCREAMING_SNAKE_CASE__ : str = channels_list[0]
for i, layer_type in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = channels_list[i]
SCREAMING_SNAKE_CASE__ : int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Tuple = F'''down_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ : Tuple = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ : Tuple = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE__ : List[Any] = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : Any = F'''down_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ : List[Any] = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ : Any = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE__ : Any = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = F'''down_blocks.{i}.attentions.{j}'''
SCREAMING_SNAKE_CASE__ : List[Any] = F'''input_blocks.{current_layer}.1'''
SCREAMING_SNAKE_CASE__ : Optional[int] = convert_attention(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE__ : Any = F'''down_blocks.{i}.downsamplers.0'''
SCREAMING_SNAKE_CASE__ : Any = F'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
SCREAMING_SNAKE_CASE__ : Any = current_channels
# hardcoded the mid-block for now
SCREAMING_SNAKE_CASE__ : int = """mid_block.resnets.0"""
SCREAMING_SNAKE_CASE__ : int = """middle_block.0"""
SCREAMING_SNAKE_CASE__ : Optional[int] = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = """mid_block.attentions.0"""
SCREAMING_SNAKE_CASE__ : int = """middle_block.1"""
SCREAMING_SNAKE_CASE__ : Optional[int] = convert_attention(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """mid_block.resnets.1"""
SCREAMING_SNAKE_CASE__ : Tuple = """middle_block.2"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
SCREAMING_SNAKE_CASE__ : List[str] = unet_config["""up_block_types"""]
for i, layer_type in enumerate(__lowerCAmelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE__ : List[Any] = F'''up_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ : str = F'''output_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ : Tuple = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE__ : int = F'''up_blocks.{i}.upsamplers.0'''
SCREAMING_SNAKE_CASE__ : Dict = F'''output_blocks.{current_layer-1}.1'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE__ : List[Any] = F'''up_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ : List[Any] = F'''output_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ : Any = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , has_skip=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''up_blocks.{i}.attentions.{j}'''
SCREAMING_SNAKE_CASE__ : int = F'''output_blocks.{current_layer}.1'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = convert_attention(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
current_layer += 1
if i != len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = F'''up_blocks.{i}.upsamplers.0'''
SCREAMING_SNAKE_CASE__ : Any = F'''output_blocks.{current_layer-1}.2'''
SCREAMING_SNAKE_CASE__ : int = convert_resnet(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : int = checkpoint["""out.0.weight"""]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = checkpoint["""out.0.bias"""]
SCREAMING_SNAKE_CASE__ : List[Any] = checkpoint["""out.2.weight"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
a :Tuple = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
a :Optional[Any] = parser.parse_args()
a :Union[str, Any] = strabool(args.class_cond)
a :List[Any] = os.path.basename(args.unet_path)
print(f'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
a :Union[str, Any] = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a :List[str] = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
a :Tuple = TEST_UNET_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
a :List[str] = None
a :str = con_pt_to_diffuser(args.unet_path, unet_config)
a :Union[str, Any] = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
a :Tuple = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
a :Tuple = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
a :str = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'Checkpoint type {ckpt_name} is not currently supported.')
a :List[str] = CMStochasticIterativeScheduler(**scheduler_config)
a :Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 56 |
"""simple docstring"""
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class __a (tf.keras.optimizers.schedules.LearningRateSchedule):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = 1.0 , _a = None , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = initial_learning_rate
SCREAMING_SNAKE_CASE__ : Tuple = warmup_steps
SCREAMING_SNAKE_CASE__ : Optional[Any] = power
SCREAMING_SNAKE_CASE__ : Optional[Any] = decay_schedule_fn
SCREAMING_SNAKE_CASE__ : Any = name
def __call__( self , _a ) -> List[Any]:
"""simple docstring"""
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(_a , tf.floataa )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa )
SCREAMING_SNAKE_CASE__ : str = global_step_float / warmup_steps_float
SCREAMING_SNAKE_CASE__ : Optional[int] = self.initial_learning_rate * tf.math.pow(_a , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_a , )
def _a ( self ) -> List[Any]:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 0.9 , __lowerCAmelCase = 0.999 , __lowerCAmelCase = 1E-8 , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0.0 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = None , ) -> Dict:
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__lowerCAmelCase , )
if num_warmup_steps:
SCREAMING_SNAKE_CASE__ : Dict = WarmUp(
initial_learning_rate=__lowerCAmelCase , decay_schedule_fn=__lowerCAmelCase , warmup_steps=__lowerCAmelCase , )
if weight_decay_rate > 0.0:
SCREAMING_SNAKE_CASE__ : int = AdamWeightDecay(
learning_rate=__lowerCAmelCase , weight_decay_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=__lowerCAmelCase , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.keras.optimizers.Adam(
learning_rate=__lowerCAmelCase , beta_a=__lowerCAmelCase , beta_a=__lowerCAmelCase , epsilon=__lowerCAmelCase , clipnorm=__lowerCAmelCase , global_clipnorm=__lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a = 0.001 , _a = 0.9 , _a = 0.999 , _a = 1E-7 , _a = False , _a = 0.0 , _a = None , _a = None , _a = "AdamWeightDecay" , **_a , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_a , _a , _a , _a , _a , _a , **_a )
SCREAMING_SNAKE_CASE__ : Tuple = weight_decay_rate
SCREAMING_SNAKE_CASE__ : Tuple = include_in_weight_decay
SCREAMING_SNAKE_CASE__ : Dict = exclude_from_weight_decay
@classmethod
def _a ( cls , _a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = {"""WarmUp""": WarmUp}
return super(_a , cls ).from_config(_a , custom_objects=_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
super(_a , self )._prepare_local(_a , _a , _a )
SCREAMING_SNAKE_CASE__ : Tuple = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _a ( self , _a , _a=None , **_a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = list(zip(*_a ) )
return super(_a , self ).apply_gradients(zip(_a , _a ) , name=_a , **_a )
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
SCREAMING_SNAKE_CASE__ : Dict = apply_state or {}
SCREAMING_SNAKE_CASE__ : List[str] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._fallback_apply_state(_a , _a )
SCREAMING_SNAKE_CASE__ : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _a ( self , _a , _a , _a=None ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Any = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_dense(_a , _a , **_a )
def _a ( self , _a , _a , _a , _a=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self._get_lr(var.device , var.dtype.base_dtype , _a )
SCREAMING_SNAKE_CASE__ : Dict = self._decay_weights_op(_a , _a , _a )
with tf.control_dependencies([decay] ):
return super(_a , self )._resource_apply_sparse(_a , _a , _a , **_a )
def _a ( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _a ( self , _a ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_a , _a ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_a , _a ) is not None:
return False
return True
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : List[str] = None
@property
def _a ( self ) -> str:
"""simple docstring"""
if self._accum_steps is None:
SCREAMING_SNAKE_CASE__ : Dict = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , _a ) -> str:
"""simple docstring"""
if not self._gradients:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_a ) , trainable=_a , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_a ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(_a )}''' )
for accum_gradient, gradient in zip(self._gradients , _a ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_a )
self._accum_steps.assign_add(1 )
def _a ( self ) -> Any:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_a ) )
| 56 | 1 |
"""simple docstring"""
__UpperCamelCase : List[Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
__UpperCamelCase : Union[str, Any] = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
__UpperCamelCase : Tuple = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
__UpperCamelCase : Optional[int] = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
__UpperCamelCase : Dict = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
__UpperCamelCase : int = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
__UpperCamelCase : str = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
__UpperCamelCase : List[Any] = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 106 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__UpperCamelCase = open # noqa: we just need to have a builtin inside this module to test it properly
| 113 | 0 |
from math import factorial
class UpperCAmelCase :
def __init__(self : int , snake_case__ : Any , snake_case__ : Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = real
if isinstance(snake_case__ , snake_case__ ):
snake_case : Any = [1] * rank
else:
snake_case : Dict = rank
def __repr__(self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return (
f"""{self.real}+"""
f"""{'+'.join(str(snake_case__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def _SCREAMING_SNAKE_CASE (self : Dict ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case__ )
def __add__(self : Any , snake_case__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
return Dual(self.real + other , self.duals )
snake_case : int = self.duals.copy()
snake_case : List[str] = other.duals.copy()
if len(snake_case__ ) > len(snake_case__ ):
o_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) )
elif len(snake_case__ ) < len(snake_case__ ):
s_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) )
snake_case : Tuple = []
for i in range(len(snake_case__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case__ )
A__ : Any = __add__
def __sub__(self : str , snake_case__ : Any ) -> List[str]:
'''simple docstring'''
return self + other * -1
def __mul__(self : str , snake_case__ : Union[str, Any] ) -> int:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case__ )
snake_case : List[str] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case__ )
A__ : Dict = __mul__
def __truediv__(self : str , snake_case__ : Any ) -> str:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Any = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case__ )
raise ValueError
def __floordiv__(self : Any , snake_case__ : Union[str, Any] ) -> Dict:
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
snake_case : Any = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case__ )
raise ValueError
def __pow__(self : str , snake_case__ : str ) -> int:
'''simple docstring'''
if n < 0 or isinstance(snake_case__ , snake_case__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
snake_case : int = self
for _ in range(n - 1 ):
x *= self
return x
def UpperCamelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ):
if not callable(lowerCamelCase__ ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(lowerCamelCase__ , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("differentiate() requires an int as input for order" )
snake_case : Optional[Any] = Dual(lowerCamelCase__ , 1 )
snake_case : str = func(lowerCamelCase__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def UpperCamelCase ( __lowerCamelCase : Optional[int] ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 371 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = """▁"""
__lowerCamelCase = {"""vocab_file""": """prophetnet.tokenizer"""}
__lowerCamelCase = {
"""vocab_file""": {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer"""
),
}
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": {"""do_lower_case""": False},
}
__lowerCamelCase = {
"""microsoft/xprophetnet-large-wiki100-cased""": 5_12,
}
def UpperCamelCase ( __lowerCamelCase : Dict ):
snake_case : Dict = collections.OrderedDict()
with open(__lowerCamelCase , "r" , encoding="utf-8" ) as reader:
snake_case : Any = reader.readlines()
for index, token in enumerate(__lowerCamelCase ):
snake_case : List[Any] = token.rstrip("\n" )
snake_case : int = index
return vocab
class UpperCAmelCase ( A_ ):
A__ : Tuple = VOCAB_FILES_NAMES
A__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : int = ["input_ids", "attention_mask"]
def __init__(self : Any , snake_case__ : Dict , snake_case__ : List[Any]="[SEP]" , snake_case__ : Optional[int]="[SEP]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : List[Any]="[UNK]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : List[Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[str] , ) -> None:
'''simple docstring'''
snake_case : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
snake_case : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case__ ) )
snake_case : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
snake_case : List[Any] = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4}
for i in range(10 ):
snake_case : Dict = f"""[unused{i}]"""
snake_case : List[str] = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
snake_case : Dict = 12
snake_case : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(snake_case__ )
def __getstate__(self : str ) -> Union[str, Any]:
'''simple docstring'''
snake_case : str = self.__dict__.copy()
snake_case : Tuple = None
return state
def __setstate__(self : str , snake_case__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"
" pip install sentencepiece" )
raise
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case : Dict = {}
snake_case : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return ([0] * len(snake_case__ )) + [1]
return ([0] * len(snake_case__ )) + [1] + ([0] * len(snake_case__ )) + [1]
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case : List[str] = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE (self : Any ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def _SCREAMING_SNAKE_CASE (self : int ) -> Any:
'''simple docstring'''
snake_case : List[str] = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _SCREAMING_SNAKE_CASE (self : Optional[int] , snake_case__ : str ) -> str:
'''simple docstring'''
return self.sp_model.encode(snake_case__ , out_type=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : Optional[int] ) -> Any:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case : Optional[Any] = self.sp_model.PieceToId(snake_case__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Optional[int] ) -> int:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _SCREAMING_SNAKE_CASE (self : Union[str, Any] , snake_case__ : Dict ) -> List[Any]:
'''simple docstring'''
snake_case : Dict = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def _SCREAMING_SNAKE_CASE (self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case : Dict = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case__ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case__ , "wb" ) as fi:
snake_case : Tuple = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (out_vocab_file,)
def _SCREAMING_SNAKE_CASE (self : Any , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
snake_case : str = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 10 | 0 |
from ..utils import DummyObject, requires_backends
class A ( metaclass=lowercase_ ):
__snake_case = ['flax']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class A ( metaclass=lowercase_ ):
__snake_case = ['flax']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class A ( metaclass=lowercase_ ):
__snake_case = ['flax']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class A ( metaclass=lowercase_ ):
__snake_case = ['flax']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class A ( metaclass=lowercase_ ):
__snake_case = ['flax']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class A ( metaclass=lowercase_ ):
__snake_case = ['flax']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class A ( metaclass=lowercase_ ):
__snake_case = ['flax']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class A ( metaclass=lowercase_ ):
__snake_case = ['flax']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class A ( metaclass=lowercase_ ):
__snake_case = ['flax']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class A ( metaclass=lowercase_ ):
__snake_case = ['flax']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class A ( metaclass=lowercase_ ):
__snake_case = ['flax']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class A ( metaclass=lowercase_ ):
__snake_case = ['flax']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
class A ( metaclass=lowercase_ ):
__snake_case = ['flax']
def __init__( self, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(self, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls, *UpperCamelCase__, **UpperCamelCase__ ):
"""simple docstring"""
requires_backends(cls, ['''flax'''] )
| 278 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowercase__ : List[Any] = logging.getLogger(__name__)
def UpperCamelCase_ ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : Tuple = argparse.ArgumentParser(
description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' )
parser.add_argument(
'--dataset_name' , type=lowerCAmelCase__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , )
parser.add_argument(
'--dataset_config' , type=lowerCAmelCase__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' )
parser.add_argument(
'--tokenizer_name_or_path' , type=lowerCAmelCase__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , )
parser.add_argument(
'--shard_size' , type=lowerCAmelCase__ , default=1000 , help='Number of entries to go in a single shard.' , )
parser.add_argument('--split' , type=lowerCAmelCase__ , default='train' , choices=['train', 'test', 'validation'] )
parser.add_argument(
'--limit' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='Limit the number of shards (used for debugging).' , )
parser.add_argument(
'--max_length' , type=lowerCAmelCase__ , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum'
' sequence length that is a multiple of 8.' , )
parser.add_argument(
'--output_dir' , default='tf-tpu' , type=lowerCAmelCase__ , help='Output directory where the TFRecord shards will be saved. If the'
' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'
' shards will be directly saved to a Google Cloud Storage bucket.' , )
lowerCAmelCase_ : List[Any] = parser.parse_args()
return args
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
def fn(lowerCAmelCase__ : Optional[Any] ):
return tokenizer(examples['text'] )
return fn
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple ) -> Dict:
"""simple docstring"""
lowerCAmelCase_ : int = []
for i in range(len(tokenized_data['input_ids'] ) ):
lowerCAmelCase_ : Tuple = {
'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ),
'attention_mask': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ),
}
lowerCAmelCase_ : Union[str, Any] = tf.train.Features(feature=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = tf.train.Example(features=lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = example.SerializeToString()
records.append(lowerCAmelCase__ )
return records
def UpperCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase_ : str = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowerCAmelCase_ : Tuple = min(len(lowerCAmelCase__ ) , args.limit )
lowerCAmelCase_ : Any = dataset.select(range(lowerCAmelCase__ ) )
print(f"Limiting the dataset to {args.limit} entries." )
lowerCAmelCase_ : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowerCAmelCase_ : int = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
else:
lowerCAmelCase_ : Dict = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowerCAmelCase_ : Dict = tokenize_function(lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = dataset.map(lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=4 , remove_columns=['text'] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowerCAmelCase__ : List[Any] ):
# Concatenate all texts.
lowerCAmelCase_ : int = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowerCAmelCase_ : Any = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowerCAmelCase_ : Union[str, Any] = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowerCAmelCase_ : Optional[Any] = {
k: [t[i : i + args.max_length] for i in range(0 , lowerCAmelCase__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowerCAmelCase_ : Optional[int] = dataset_tokenized.map(lowerCAmelCase__ , batched=lowerCAmelCase__ , batch_size=1000 , num_proc=4 )
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[Any] = 0
for shard in range(0 , len(lowerCAmelCase__ ) , args.shard_size ):
lowerCAmelCase_ : Dict = grouped_dataset[shard : shard + args.shard_size]
lowerCAmelCase_ : Tuple = len(dataset_snapshot['input_ids'] )
lowerCAmelCase_ : Optional[Any] = os.path.join(lowerCAmelCase__ , f"dataset-{shard_count}-{records_containing}.tfrecord" )
lowerCAmelCase_ : Tuple = get_serialized_examples(lowerCAmelCase__ )
with tf.io.TFRecordWriter(lowerCAmelCase__ ) as out_file:
for i in range(len(lowerCAmelCase__ ) ):
lowerCAmelCase_ : Dict = serialized_examples[i]
out_file.write(lowerCAmelCase__ )
print('Wrote file {} containing {} records'.format(lowerCAmelCase__ , lowerCAmelCase__ ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , 'w' ) as f:
print(f"Total {args.split} records: {total_records}" , file=lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ : int = parse_args()
main(args)
| 224 | 0 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCAmelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = {'''type''': '''section''', '''text''': {'''type''': '''plain_text''', '''text''': '''No failed tests! 🤗''', '''emoji''': True}}
lowerCAmelCase__ = [
{
'''type''': '''header''',
'''text''': {
'''type''': '''plain_text''',
'''text''': f'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
'''emoji''': True,
},
}
]
lowerCAmelCase__ = 0
for log in Path().glob('''*.log'''):
lowerCAmelCase__ = 0
with open(log, '''r''') as f:
for line in f:
lowerCAmelCase__ = json.loads(line)
if line.get('''nodeid''', '''''') != "":
lowerCAmelCase__ = line['''nodeid''']
if line.get('''duration''', None) is not None:
lowerCAmelCase__ = f'''{line['duration']:.4f}'''
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCAmelCase__ = []
log.unlink()
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
lowerCAmelCase__ = []
lowerCAmelCase__ = {}
for test in failed_tests:
lowerCAmelCase__ = test[0].split('''::''')
lowerCAmelCase__ = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
lowerCAmelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCAmelCase__ = [test[0] for test in failed_table]
lowerCAmelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCAmelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCAmelCase__ = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
lowerCAmelCase__ = '''Too many failed tests, please see the full report in the Action results.'''
lowerCAmelCase__ = len(err) + 10
lowerCAmelCase__ = message[: 3_000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
lowerCAmelCase__ = '''No failed tests! 🤗'''
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
lowerCAmelCase__ = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
lowerCAmelCase__ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': message,
},
}
payload.append(md_report)
lowerCAmelCase__ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': '''*For more details:*''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''Check Action results''',
'''emoji''': True,
},
'''url''': f'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
lowerCAmelCase__ = {
'''type''': '''context''',
'''elements''': [
{
'''type''': '''plain_text''',
'''text''': f'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
lowerCAmelCase__ = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
lowerCAmelCase__ = response.data['''ts''']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCAmelCase__ = ''''''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCAmelCase__ = row[0]
else:
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = {
'''type''': '''section''',
'''text''': {
'''type''': '''mrkdwn''',
'''text''': f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 244 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
UpperCamelCase = DetaConfig(
backbone_config=_SCREAMING_SNAKE_CASE , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=_SCREAMING_SNAKE_CASE , with_box_refine=_SCREAMING_SNAKE_CASE , two_stage=_SCREAMING_SNAKE_CASE , )
# set labels
UpperCamelCase = "huggingface/label-files"
if "o365" in model_name:
UpperCamelCase = 366
UpperCamelCase = "object365-id2label.json"
else:
UpperCamelCase = 91
UpperCamelCase = "coco-detection-id2label.json"
UpperCamelCase = num_labels
UpperCamelCase = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
UpperCamelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = dct.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
UpperCamelCase = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:dim, :]
UpperCamelCase = in_proj_bias[: dim]
UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase = in_proj_bias[
dim : dim * 2
]
UpperCamelCase = in_proj_weight[
-dim :, :
]
UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
UpperCamelCase = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:hidden_size, :]
UpperCamelCase = in_proj_bias[:hidden_size]
UpperCamelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase = in_proj_weight[-hidden_size:, :]
UpperCamelCase = in_proj_bias[-hidden_size:]
def a__ ( ):
"""simple docstring"""
UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_deta_config(_SCREAMING_SNAKE_CASE )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
UpperCamelCase = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F"Model name {model_name} not supported" )
UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(_SCREAMING_SNAKE_CASE , param.shape )
# rename keys
UpperCamelCase = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(_SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
if "input_proj" in key:
UpperCamelCase = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = DetaForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = "cuda" if torch.cuda.is_available() else "cpu"
model.to(_SCREAMING_SNAKE_CASE )
# load image processor
UpperCamelCase = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
UpperCamelCase = prepare_img()
UpperCamelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCamelCase = encoding["pixel_values"]
UpperCamelCase = model(pixel_values.to(_SCREAMING_SNAKE_CASE ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
UpperCamelCase = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
UpperCamelCase = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_SCREAMING_SNAKE_CASE ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_SCREAMING_SNAKE_CASE ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 244 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = KandinskyVaaImgaImgPipeline
__lowerCamelCase = ['image_embeds', 'negative_image_embeds', 'image']
__lowerCamelCase = [
'image_embeds',
'negative_image_embeds',
'image',
]
__lowerCamelCase = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__lowerCamelCase = False
@property
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
return 32
@property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return 100
@property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
A__ = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
A__ = UNetaDConditionModel(**lowercase )
return model
@property
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = {
"num_train_timesteps": 1000,
"beta_schedule": "linear",
"beta_start": 0.0_0085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
A__ = DDIMScheduler(**lowercase )
A__ = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCamelCase ( self , lowercase , lowercase=0 ) -> Any:
'''simple docstring'''
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowercase ) ).to(lowercase )
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowercase )
# create init_image
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowercase ) ).to(lowercase )
A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(lowercase ) ).convert("RGB" ).resize((256, 256) )
if str(lowercase ).startswith("mps" ):
A__ = torch.manual_seed(lowercase )
else:
A__ = torch.Generator(device=lowercase ).manual_seed(lowercase )
A__ = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = "cpu"
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**lowercase )
A__ = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A__ = pipe(**self.get_dummy_inputs(lowercase ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(lowercase ) , return_dict=lowercase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_img2img_frog.npy" )
A__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
A__ = "A red cartoon frog, 4k"
A__ = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowercase )
A__ = KandinskyVaaImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
A__ = pipeline.to(lowercase )
pipeline.set_progress_bar_config(disable=lowercase )
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ , A__ = pipe_prior(
lowercase , generator=lowercase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
A__ = pipeline(
image=lowercase , image_embeds=lowercase , negative_image_embeds=lowercase , generator=lowercase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 68 |
'''simple docstring'''
import torch
from torch import nn
class _snake_case ( nn.Module ):
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1 , _lowerCamelCase=False):
super().__init__()
UpperCAmelCase__ : List[Any] = n_token
UpperCAmelCase__ : Tuple = d_embed
UpperCAmelCase__ : str = d_proj
UpperCAmelCase__ : str = cutoffs + [n_token]
UpperCAmelCase__ : List[Any] = [0] + self.cutoffs
UpperCAmelCase__ : Optional[Any] = div_val
UpperCAmelCase__ : Optional[int] = self.cutoffs[0]
UpperCAmelCase__ : Optional[int] = len(self.cutoffs) - 1
UpperCAmelCase__ : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCAmelCase__ : int = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed))
UpperCAmelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters))
UpperCAmelCase__ : int = nn.ModuleList()
UpperCAmelCase__ : List[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs)):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase)))
else:
self.out_projs.append(_lowerCamelCase)
self.out_layers.append(nn.Linear(_lowerCamelCase , _lowerCamelCase))
else:
for i in range(len(self.cutoffs)):
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase)))
self.out_layers.append(nn.Linear(_lowerCamelCase , r_idx - l_idx))
UpperCAmelCase__ : Optional[int] = keep_order
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
if proj is None:
UpperCAmelCase__ : Dict = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCAmelCase__ : Optional[int] = nn.functional.linear(_lowerCamelCase , proj.t().contiguous())
UpperCAmelCase__ : List[str] = nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase)
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False):
if labels is not None:
# Shift so that tokens < n predict n
UpperCAmelCase__ : Optional[int] = hidden[..., :-1, :].contiguous()
UpperCAmelCase__ : int = labels[..., 1:].contiguous()
UpperCAmelCase__ : List[str] = hidden.view(-1 , hidden.size(-1))
UpperCAmelCase__ : Optional[int] = labels.view(-1)
if hidden.size(0) != labels.size(0):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""")
else:
UpperCAmelCase__ : Optional[int] = hidden.view(-1 , hidden.size(-1))
if self.n_clusters == 0:
UpperCAmelCase__ : Tuple = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
if labels is not None:
UpperCAmelCase__ : Dict = labels != -100
UpperCAmelCase__ : Tuple = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device)
UpperCAmelCase__ : List[Any] = (
-nn.functional.log_softmax(_lowerCamelCase , dim=-1)[mask].gather(1 , labels[mask].unsqueeze(1)).squeeze(1)
)
else:
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=-1)
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : int = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Dict = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : Union[str, Any] = self.out_layers[i].weight
UpperCAmelCase__ : Any = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : Optional[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
UpperCAmelCase__ : List[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_lowerCamelCase)
biases.append(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : Optional[int] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = nn.functional.log_softmax(_lowerCamelCase , dim=1)
if labels is None:
UpperCAmelCase__ : str = hidden.new_empty((head_logit.size(0), self.n_token))
else:
UpperCAmelCase__ : Optional[Any] = torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device)
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = [0] + self.cutoffs
for i in range(len(_lowerCamelCase) - 1):
UpperCAmelCase__ , UpperCAmelCase__ : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCAmelCase__ : List[str] = (labels >= l_idx) & (labels < r_idx)
UpperCAmelCase__ : str = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCAmelCase__ : List[Any] = labels.index_select(0 , _lowerCamelCase) - l_idx
UpperCAmelCase__ : List[str] = head_logprob.index_select(0 , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = hidden.index_select(0 , _lowerCamelCase)
else:
UpperCAmelCase__ : Any = hidden
if i == 0:
if labels is not None:
UpperCAmelCase__ : List[Any] = head_logprob_i.gather(1 , target_i[:, None]).squeeze(1)
else:
UpperCAmelCase__ : Tuple = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : int = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : str = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : int = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCAmelCase__ : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None]).squeeze(1)
else:
UpperCAmelCase__ : List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCAmelCase__ : Tuple = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""") and self.keep_order) or keep_order:
out.index_copy_(0 , _lowerCamelCase , -logprob_i)
else:
out[offset : offset + logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0)
return out
def snake_case__ ( self , _lowerCamelCase):
if self.n_clusters == 0:
UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0])
return nn.functional.log_softmax(_lowerCamelCase , dim=-1)
else:
# construct weights and biases
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase__ : Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCAmelCase__ : Any = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCAmelCase__ : int = self.out_layers[i].weight
UpperCAmelCase__ : List[str] = self.out_layers[i].bias
if i == 0:
UpperCAmelCase__ : List[Any] = torch.cat([weight_i, self.cluster_weight] , dim=0)
UpperCAmelCase__ : Optional[int] = torch.cat([bias_i, self.cluster_bias] , dim=0)
weights.append(_lowerCamelCase)
biases.append(_lowerCamelCase)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = weights[0], biases[0], self.out_projs[0]
UpperCAmelCase__ : List[Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[Any] = hidden.new_empty((head_logit.size(0), self.n_token))
UpperCAmelCase__ : int = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : str = [0] + self.cutoffs
for i in range(len(_lowerCamelCase) - 1):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCAmelCase__ : List[Any] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = weights[i], biases[i], self.out_projs[i]
UpperCAmelCase__ : Union[str, Any] = self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
UpperCAmelCase__ : List[str] = nn.functional.log_softmax(_lowerCamelCase , dim=1)
UpperCAmelCase__ : Union[str, Any] = head_logprob[:, -i] + tail_logprob_i
UpperCAmelCase__ : Dict = logprob_i
return out | 163 | 0 |
"""simple docstring"""
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Optional[Any] = '''tiny-wmt19-en-ru'''
# Build
# borrowed from a test
__SCREAMING_SNAKE_CASE : str = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
__SCREAMING_SNAKE_CASE : Any = dict(zip(vocab, range(len(vocab))))
__SCREAMING_SNAKE_CASE : Any = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE : Optional[int] = Path(tmpdirname)
__SCREAMING_SNAKE_CASE : int = build_dir / VOCAB_FILES_NAMES['''src_vocab_file''']
__SCREAMING_SNAKE_CASE : Optional[int] = build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file''']
__SCREAMING_SNAKE_CASE : List[str] = build_dir / VOCAB_FILES_NAMES['''merges_file''']
with open(src_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, '''w''') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, '''w''') as fp:
fp.write('''\n'''.join(merges))
__SCREAMING_SNAKE_CASE : Any = FSMTTokenizer(
langs=['''en''', '''ru'''],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__SCREAMING_SNAKE_CASE : Any = FSMTConfig(
langs=['''ru''', '''en'''],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__SCREAMING_SNAKE_CASE : str = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
__SCREAMING_SNAKE_CASE : Tuple = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
__SCREAMING_SNAKE_CASE : str = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 370 |
"""simple docstring"""
import os
import sys
__SCREAMING_SNAKE_CASE : Dict = os.path.join(os.path.dirname(__file__), '''src''')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__SCREAMING_SNAKE_CASE : List[Any] = [
'''torch''',
'''numpy''',
'''tokenizers''',
'''filelock''',
'''requests''',
'''tqdm''',
'''regex''',
'''sentencepiece''',
'''sacremoses''',
'''importlib_metadata''',
'''huggingface_hub''',
]
@add_start_docstrings(AutoConfig.__doc__ )
def lowerCAmelCase_( *lowercase_ : Any , **lowercase_ : Optional[Any] ) -> Optional[Any]:
return AutoConfig.from_pretrained(*lowercase_ , **lowercase_ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def lowerCAmelCase_( *lowercase_ : List[Any] , **lowercase_ : List[Any] ) -> Tuple:
return AutoTokenizer.from_pretrained(*lowercase_ , **lowercase_ )
@add_start_docstrings(AutoModel.__doc__ )
def lowerCAmelCase_( *lowercase_ : Optional[Any] , **lowercase_ : Optional[Any] ) -> int:
return AutoModel.from_pretrained(*lowercase_ , **lowercase_ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def lowerCAmelCase_( *lowercase_ : Dict , **lowercase_ : List[Any] ) -> int:
return AutoModelForCausalLM.from_pretrained(*lowercase_ , **lowercase_ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def lowerCAmelCase_( *lowercase_ : str , **lowercase_ : List[Any] ) -> Any:
return AutoModelForMaskedLM.from_pretrained(*lowercase_ , **lowercase_ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def lowerCAmelCase_( *lowercase_ : Tuple , **lowercase_ : List[Any] ) -> Dict:
return AutoModelForSequenceClassification.from_pretrained(*lowercase_ , **lowercase_ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def lowerCAmelCase_( *lowercase_ : List[str] , **lowercase_ : List[str] ) -> str:
return AutoModelForQuestionAnswering.from_pretrained(*lowercase_ , **lowercase_ )
| 73 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Union[str, Any] = {
"""attention_cell""": """multi_head""",
"""num_layers""": 4,
"""units""": 1024,
"""hidden_size""": 768,
"""max_length""": 512,
"""num_heads""": 8,
"""scaled""": True,
"""dropout""": 0.1,
"""use_residual""": True,
"""embed_size""": 1024,
"""embed_dropout""": 0.1,
"""word_embed""": None,
"""layer_norm_eps""": 1e-5,
"""token_type_vocab_size""": 2,
}
__lowercase : Optional[int] = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__lowercase : int = BERTEncoder(
attention_cell=predefined_args["""attention_cell"""] , num_layers=predefined_args["""num_layers"""] , units=predefined_args["""units"""] , hidden_size=predefined_args["""hidden_size"""] , max_length=predefined_args["""max_length"""] , num_heads=predefined_args["""num_heads"""] , scaled=predefined_args["""scaled"""] , dropout=predefined_args["""dropout"""] , output_attention=lowerCAmelCase_ , output_all_encodings=lowerCAmelCase_ , use_residual=predefined_args["""use_residual"""] , activation=predefined_args.get("""activation""" , """gelu""" ) , layer_norm_eps=predefined_args.get("""layer_norm_eps""" , lowerCAmelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__lowercase : int = """openwebtext_ccnews_stories_books_cased"""
# Specify download folder to Gluonnlp's vocab
__lowercase : str = os.path.join(get_home_dir() , """models""" )
__lowercase : Optional[int] = _load_vocab(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , cls=lowerCAmelCase_ )
__lowercase : Tuple = nlp.model.BERTModel(
lowerCAmelCase_ , len(lowerCAmelCase_ ) , units=predefined_args["""units"""] , embed_size=predefined_args["""embed_size"""] , embed_dropout=predefined_args["""embed_dropout"""] , word_embed=predefined_args["""word_embed"""] , use_pooler=lowerCAmelCase_ , use_token_type_embed=lowerCAmelCase_ , token_type_vocab_size=predefined_args["""token_type_vocab_size"""] , use_classifier=lowerCAmelCase_ , use_decoder=lowerCAmelCase_ , )
original_bort.load_parameters(lowerCAmelCase_ , cast_dtype=lowerCAmelCase_ , ignore_extra=lowerCAmelCase_ )
__lowercase : Optional[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
__lowercase : Dict = {
"""architectures""": ["""BertForMaskedLM"""],
"""attention_probs_dropout_prob""": predefined_args["""dropout"""],
"""hidden_act""": """gelu""",
"""hidden_dropout_prob""": predefined_args["""dropout"""],
"""hidden_size""": predefined_args["""embed_size"""],
"""initializer_range""": 0.02,
"""intermediate_size""": predefined_args["""hidden_size"""],
"""layer_norm_eps""": predefined_args["""layer_norm_eps"""],
"""max_position_embeddings""": predefined_args["""max_length"""],
"""model_type""": """bort""",
"""num_attention_heads""": predefined_args["""num_heads"""],
"""num_hidden_layers""": predefined_args["""num_layers"""],
"""pad_token_id""": 1, # 2 = BERT, 1 = RoBERTa
"""type_vocab_size""": 1, # 2 = BERT, 1 = RoBERTa
"""vocab_size""": len(lowerCAmelCase_ ),
}
__lowercase : Tuple = BertConfig.from_dict(lowerCAmelCase_ )
__lowercase : Optional[int] = BertForMaskedLM(lowerCAmelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCAmelCase_ : int ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ):
__lowercase : str = hf_param.shape
__lowercase : Optional[int] = to_torch(params[gluon_param] )
__lowercase : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
__lowercase : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , """word_embed.0.weight""" )
__lowercase : int = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , """encoder.position_weight""" )
__lowercase : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , """encoder.layer_norm.beta""" )
__lowercase : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , """encoder.layer_norm.gamma""" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__lowercase : Tuple = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__lowercase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__lowercase : BertSelfAttention = layer.attention.self
__lowercase : int = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
__lowercase : str = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
__lowercase : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
__lowercase : Tuple = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
__lowercase : Optional[int] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
__lowercase : Dict = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
__lowercase : BertSelfOutput = layer.attention.output
__lowercase : int = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
__lowercase : Union[str, Any] = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
__lowercase : Optional[int] = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
__lowercase : Tuple = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
__lowercase : BertIntermediate = layer.intermediate
__lowercase : Tuple = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
__lowercase : int = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
__lowercase : BertOutput = layer.output
__lowercase : Dict = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
__lowercase : int = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
__lowercase : Optional[int] = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
__lowercase : Any = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__lowercase : Union[str, Any] = RobertaTokenizer.from_pretrained("""roberta-base""" )
__lowercase : Any = tokenizer.encode_plus(lowerCAmelCase_ )["""input_ids"""]
# Get gluon output
__lowercase : Dict = mx.nd.array([input_ids] )
__lowercase : Optional[Any] = original_bort(inputs=lowerCAmelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCAmelCase_ )
__lowercase : Union[str, Any] = BertModel.from_pretrained(lowerCAmelCase_ )
hf_bort_model.eval()
__lowercase : Any = tokenizer.encode_plus(lowerCAmelCase_ , return_tensors="""pt""" )
__lowercase : str = hf_bort_model(**lowerCAmelCase_ )[0]
__lowercase : Optional[Any] = output_gluon[0].asnumpy()
__lowercase : Tuple = output_hf[0].detach().numpy()
__lowercase : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__lowercase : Union[str, Any] = np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if success:
print("""✔️ Both model do output the same tensors""" )
else:
print("""❌ Both model do **NOT** output the same tensors""" )
print("""Absolute difference is:""" , lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path) | 233 |
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
__lowercase : Dict = number_of_bytes // partitions
__lowercase : Union[str, Any] = []
for i in range(lowerCAmelCase_ ):
__lowercase : str = i * bytes_per_partition + 1
__lowercase : List[Any] = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"{start_bytes}-{end_bytes}" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod() | 233 | 1 |
def _lowerCAmelCase ( A__: list[int] , A__: str ):
'''simple docstring'''
UpperCAmelCase = int(A__ )
# Initialize Result
UpperCAmelCase = []
# Traverse through all denomination
for denomination in reversed(A__ ):
# Find denominations
while int(A__ ) >= int(A__ ):
total_value -= int(A__ )
answer.append(A__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__magic_name__ = []
__magic_name__ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
__magic_name__ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
__magic_name__ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
__magic_name__ = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
__magic_name__ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f'''Following is minimal change for {value}: ''')
__magic_name__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 152 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__magic_name__ = "Usage of script: script_name <size_of_canvas:int>"
__magic_name__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def _lowerCAmelCase ( A__: int ):
'''simple docstring'''
UpperCAmelCase = [[False for i in range(A__ )] for j in range(A__ )]
return canvas
def _lowerCAmelCase ( A__: list[list[bool]] ):
'''simple docstring'''
for i, row in enumerate(A__ ):
for j, _ in enumerate(A__ ):
UpperCAmelCase = bool(random.getrandbits(1 ) )
def _lowerCAmelCase ( A__: list[list[bool]] ):
'''simple docstring'''
UpperCAmelCase = np.array(A__ )
UpperCAmelCase = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(A__ ):
for c, pt in enumerate(A__ ):
UpperCAmelCase = __judge_point(
A__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
UpperCAmelCase = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
UpperCAmelCase = current_canvas.tolist()
return return_canvas
def _lowerCAmelCase ( A__: bool , A__: list[list[bool]] ):
'''simple docstring'''
UpperCAmelCase = 0
UpperCAmelCase = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
UpperCAmelCase = pt
if pt:
if alive < 2:
UpperCAmelCase = False
elif alive == 2 or alive == 3:
UpperCAmelCase = True
elif alive > 3:
UpperCAmelCase = False
else:
if alive == 3:
UpperCAmelCase = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__magic_name__ = int(sys.argv[1])
# main working structure of this module.
__magic_name__ = create_canvas(canvas_size)
seed(c)
__magic_name__ , __magic_name__ = plt.subplots()
fig.show()
__magic_name__ = ListedColormap(["w", "k"])
try:
while True:
__magic_name__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 152 | 1 |
'''simple docstring'''
# using dfs for finding eulerian path traversal
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> str:
__lowerCamelCase = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
__lowerCamelCase , __lowerCamelCase = True, True
__lowerCamelCase = dfs(_A , _A , _A , _A )
return path
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
__lowerCamelCase = 0
__lowerCamelCase = -1
for i in range(_A ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
__lowerCamelCase = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
__lowerCamelCase = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
__lowerCamelCase , __lowerCamelCase = check_circuit_or_path(_A , _A )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
__lowerCamelCase = 1
if check == 2:
__lowerCamelCase = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
__lowerCamelCase = dfs(_A , _A , _A )
print(_A )
def __lowerCAmelCase ( ) -> int:
__lowerCamelCase = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
__lowerCamelCase = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
__lowerCamelCase = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
__lowerCamelCase = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
__lowerCamelCase = {
1: [],
2: []
# all degree is zero
}
__lowerCamelCase = 10
check_euler(_A , _A )
check_euler(_A , _A )
check_euler(_A , _A )
check_euler(_A , _A )
check_euler(_A , _A )
if __name__ == "__main__":
main()
| 67 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 188 | 0 |
UpperCamelCase__ = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 102 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__ :
_a : str = field(
default=snake_case__ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(snake_case__ )} )
_a : str = field(
default=snake_case__ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
_a : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
_a : int = field(
default=1_2_8 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
_a : int = field(
default=6_4 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
_a : int = field(
default=3_0 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
_a : bool = field(
default=snake_case__ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
_a : float = field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_a : int = field(
default=2_0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
_a : int = field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
_a : int = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class a__ ( snake_case__ ):
_a : Any = """train"""
_a : Union[str, Any] = """dev"""
class a__ ( snake_case__ ):
_a : SquadDataTrainingArguments
_a : List[SquadFeatures]
_a : Split
_a : bool
def __init__( self , _A , _A , _A = None , _A = Split.train , _A = False , _A = None , _A = "pt" , ):
"""simple docstring"""
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_A , _A ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = "v2" if args.version_2_with_negative else "v1"
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + ".lock"
with FileLock(_A ):
if os.path.exists(_A ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(_A )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["features"]
__lowerCAmelCase = self.old_features.get("dataset" , _A )
__lowerCAmelCase = self.old_features.get("examples" , _A )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
" future run" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_A , )
__lowerCAmelCase = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _A , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , _A ):
"""simple docstring"""
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask , dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible , dtype=torch.float )
__lowerCAmelCase = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position , dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 102 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : Dict = "▁"
_UpperCamelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCamelCase : Dict = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCamelCase : Tuple = {
"facebook/xglm-564M": 20_48,
}
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self , a , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a = None , **a , ) -> None:
lowercase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : int = 7
lowercase__ : int = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
lowercase__ : Optional[int] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a ) )
lowercase__ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : List[str] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Union[str, Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
lowercase__ : Tuple = len(self.sp_model )
lowercase__ : List[Any] = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(a )
lowercase__ : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[str]:
lowercase__ : List[Any] = self.__dict__.copy()
lowercase__ : Tuple = None
lowercase__ : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , a ) -> int:
lowercase__ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ : Tuple = {}
lowercase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : Optional[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _UpperCAmelCase ( self , a , a = None , a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a ))
return [1] + ([0] * len(a )) + [1, 1] + ([0] * len(a ))
def _UpperCAmelCase ( self , a , a = None ) -> List[int]:
lowercase__ : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _UpperCAmelCase ( self ) -> Dict:
lowercase__ : Optional[Any] = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self , a ) -> List[str]:
return self.sp_model.encode(a , out_type=a )
def _UpperCAmelCase ( self , a ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : Any = self.sp_model.PieceToId(a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self , a ) -> List[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCAmelCase ( self , a ) -> Dict:
lowercase__ : List[Any] = ''.join(a ).replace(a , ' ' ).strip()
return out_string
def _UpperCAmelCase ( self , a , a = None ) -> Tuple[str]:
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : Any = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowercase__ : Any = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 77 | """simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Dict = ["image_processor", "tokenizer"]
lowerCamelCase__ : Dict = "BlipImageProcessor"
lowerCamelCase__ : Union[str, Any] = "AutoTokenizer"
def __init__( self , a , a , a ) -> Optional[int]:
super().__init__(a , a )
# add QFormer tokenizer
lowercase__ : Dict = qformer_tokenizer
def __call__( self , a = None , a = None , a = True , a = False , a = None , a = None , a = 0 , a = None , a = None , a = False , a = False , a = False , a = False , a = False , a = True , a = None , **a , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
lowercase__ : List[Any] = BatchFeature()
if text is not None:
lowercase__ : Optional[int] = self.tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
encoding.update(a )
lowercase__ : Optional[int] = self.qformer_tokenizer(
text=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_token_type_ids=a , return_length=a , verbose=a , return_tensors=a , **a , )
lowercase__ : List[str] = qformer_text_encoding.pop('input_ids' )
lowercase__ : Any = qformer_text_encoding.pop('attention_mask' )
if images is not None:
lowercase__ : List[Any] = self.image_processor(a , return_tensors=a )
encoding.update(a )
return encoding
def _UpperCAmelCase ( self , *a , **a ) -> List[str]:
return self.tokenizer.batch_decode(*a , **a )
def _UpperCAmelCase ( self , *a , **a ) -> Tuple:
return self.tokenizer.decode(*a , **a )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCAmelCase ( self ) -> Union[str, Any]:
lowercase__ : str = self.tokenizer.model_input_names
lowercase__ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def _UpperCAmelCase ( self , a , **a ) -> Optional[int]:
if os.path.isfile(a ):
raise ValueError(f"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(a , exist_ok=a )
lowercase__ : int = os.path.join(a , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(a )
return super().save_pretrained(a , **a )
@classmethod
def _UpperCAmelCase ( cls , a , **a ) -> str:
lowercase__ : str = AutoTokenizer.from_pretrained(a , subfolder='qformer_tokenizer' )
lowercase__ : int = cls._get_arguments_from_pretrained(a , **a )
args.append(a )
return cls(*a )
| 77 | 1 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase_ = """true"""
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=82 , UpperCamelCase__ : List[str]=16 ) -> int:
'''simple docstring'''
set_seed(42 )
_snake_case = RegressionModel()
_snake_case = deepcopy(_lowerCAmelCase )
_snake_case = RegressionDataset(length=_lowerCAmelCase )
_snake_case = DataLoader(_lowerCAmelCase , batch_size=_lowerCAmelCase )
model.to(accelerator.device )
_snake_case = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
return model, ddp_model, dataloader
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any]=False ) -> List[str]:
'''simple docstring'''
_snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
_snake_case = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(UpperCamelCase__ : Any ):
_snake_case = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
with accelerator.main_process_first():
_snake_case = dataset.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
_snake_case = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(UpperCamelCase__ : Any ):
if use_longest:
return tokenizer.pad(_lowerCAmelCase , padding='longest' , return_tensors='pt' )
return tokenizer.pad(_lowerCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(_lowerCAmelCase , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=16 )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
_snake_case = Accelerator(dispatch_batches=_lowerCAmelCase , split_batches=_lowerCAmelCase )
_snake_case = get_dataloader(_lowerCAmelCase , not dispatch_batches )
_snake_case = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_lowerCAmelCase )
_snake_case = accelerator.prepare(_lowerCAmelCase , _lowerCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> int:
'''simple docstring'''
_snake_case = []
for batch in dataloader:
_snake_case = batch.values()
with torch.no_grad():
_snake_case = model(_lowerCAmelCase )
_snake_case = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_snake_case = [], []
for logit, targ in logits_and_targets:
logits.append(_lowerCAmelCase )
targs.append(_lowerCAmelCase )
_snake_case = torch.cat(_lowerCAmelCase ), torch.cat(_lowerCAmelCase )
return logits, targs
def lowerCamelCase__ ( UpperCamelCase__ : str , UpperCamelCase__ : Tuple=82 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Any=16 ) -> Dict:
'''simple docstring'''
_snake_case = get_basic_setup(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_snake_case = generate_predictions(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
assert (
len(_lowerCAmelCase ) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_lowerCAmelCase )}'''
def lowerCamelCase__ ( UpperCamelCase__ : Dict = False , UpperCamelCase__ : int = False ) -> List[str]:
'''simple docstring'''
_snake_case = evaluate.load('glue' , 'mrpc' )
_snake_case = get_mrpc_setup(_lowerCAmelCase , _lowerCAmelCase )
# First do baseline
_snake_case = setup["""no"""]
model.to(_lowerCAmelCase )
model.eval()
for batch in dataloader:
batch.to(_lowerCAmelCase )
with torch.inference_mode():
_snake_case = model(**_lowerCAmelCase )
_snake_case = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_lowerCAmelCase , references=batch['labels'] )
_snake_case = metric.compute()
# Then do distributed
_snake_case = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
_snake_case = model(**_lowerCAmelCase )
_snake_case = outputs.logits.argmax(dim=-1 )
_snake_case = batch["""labels"""]
_snake_case = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_lowerCAmelCase , references=_lowerCAmelCase )
_snake_case = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def lowerCamelCase__ ( ) -> Any:
'''simple docstring'''
_snake_case = Accelerator(split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(_lowerCAmelCase , _lowerCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_snake_case = Accelerator(split_batches=_lowerCAmelCase , dispatch_batches=_lowerCAmelCase )
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(_lowerCAmelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
_snake_case = Accelerator()
test_torch_metrics(_lowerCAmelCase , 512 )
accelerator.state._reset_state()
def lowerCamelCase__ ( UpperCamelCase__ : str ) -> Union[str, Any]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 371 |
def lowerCamelCase__ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(UpperCamelCase__ , UpperCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 295 | 0 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __A ( a_ :List[str]) -> str:
__a : Optional[int] = []
for line in lines:
__a : Any = re.sub(R'''#.*''' , '''''' , lowerCamelCase__) # remove comments
if line:
filtered_lines.append(lowerCamelCase__)
__a : Dict = "\n".join(lowerCamelCase__)
# Make a hash from all this code
__a : List[Any] = full_str.encode('''utf-8''')
return shaaaa(lowerCamelCase__).hexdigest()
# get importable module names and hash for caching
A = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
A = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
A = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
A = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''') | 160 |
"""simple docstring"""
import os
def _snake_case ( ) -> Dict:
with open(os.path.dirname(lowerCamelCase__ ) + "/p022_names.txt" ) as file:
lowerCamelCase_ : str =str(file.readlines()[0] )
lowerCamelCase_ : Union[str, Any] =names.replace("\"" , "" ).split("," )
names.sort()
lowerCamelCase_ : str =0
lowerCamelCase_ : Optional[int] =0
for i, name in enumerate(lowerCamelCase__ ):
for letter in name:
name_score += ord(lowerCamelCase__ ) - 64
total_score += (i + 1) * name_score
lowerCamelCase_ : List[Any] =0
return total_score
if __name__ == "__main__":
print(solution())
| 144 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class a__ :
_lowerCamelCase = 42
_lowerCamelCase = None
_lowerCamelCase = None
UpperCamelCase_: Any = namedtuple('CoinsDistribResult', 'moves excess')
def lowercase__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(_UpperCAmelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(_UpperCAmelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(_UpperCAmelCase ) != count_coins(_UpperCAmelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(_UpperCAmelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase : Tuple = get_distrib(node.left )
lowercase : List[str] = get_distrib(node.right )
lowercase : Any = 1 - left_distrib_excess
lowercase : int = 1 - right_distrib_excess
lowercase : str = (
left_distrib_moves
+ right_distrib_moves
+ abs(_UpperCAmelCase )
+ abs(_UpperCAmelCase )
)
lowercase : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(_UpperCAmelCase , _UpperCAmelCase )
return get_distrib(_UpperCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class a__ ( SCREAMING_SNAKE_CASE__ ):
def lowercase ( self : Any ) -> Optional[int]:
lowercase : Any = tempfile.mkdtemp()
lowercase : Optional[Any] = 8
# DPR tok
lowercase : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowercase : List[Any] = os.path.join(self.tmpdirname, 'dpr_tokenizer' )
os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase )
lowercase : Union[str, Any] = os.path.join(lowerCAmelCase, DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
lowercase : Optional[Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowercase : Optional[Any] = dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowercase : Optional[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowercase : int = {'unk_token': '<unk>'}
lowercase : Union[str, Any] = os.path.join(self.tmpdirname, 'bart_tokenizer' )
os.makedirs(lowerCAmelCase, exist_ok=lowerCAmelCase )
lowercase : int = os.path.join(lowerCAmelCase, BART_VOCAB_FILES_NAMES['vocab_file'] )
lowercase : str = os.path.join(lowerCAmelCase, BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + '\n' )
with open(self.merges_file, 'w', encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase ) )
def lowercase ( self : int ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, 'dpr_tokenizer' ) )
def lowercase ( self : Optional[Any] ) -> DPRContextEncoderTokenizer:
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, 'dpr_tokenizer' ) )
def lowercase ( self : Optional[int] ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, 'bart_tokenizer' ) )
def lowercase ( self : int ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase : Dict = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings', string_factory='Flat', metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowercase ( self : Tuple ) -> Tuple:
lowercase : str = self.get_dummy_dataset()
lowercase : Tuple = RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
lowercase : Optional[Any] = dataset
lowercase : Dict = RagRetriever(
lowerCAmelCase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), )
return retriever
def lowercase ( self : List[Any], lowerCAmelCase : bool ) -> List[str]:
lowercase : List[Any] = self.get_dummy_dataset()
lowercase : Any = RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name='custom', )
if from_disk:
lowercase : Optional[Any] = os.path.join(self.tmpdirname, 'dataset' )
lowercase : str = os.path.join(self.tmpdirname, 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname, 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname, 'dataset' ) )
del dataset
lowercase : Optional[Any] = RagRetriever(
lowerCAmelCase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), )
else:
lowercase : Tuple = RagRetriever(
lowerCAmelCase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), index=CustomHFIndex(config.retrieval_vector_size, lowerCAmelCase ), )
return retriever
def lowercase ( self : Dict ) -> str:
lowercase : int = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings', string_factory='Flat', metric_type=faiss.METRIC_INNER_PRODUCT )
lowercase : Dict = os.path.join(self.tmpdirname, 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings', index_file_name + '.index.dpr' )
pickle.dump(dataset['id'], open(index_file_name + '.index_meta.dpr', 'wb' ) )
lowercase : List[str] = os.path.join(self.tmpdirname, 'psgs_w100.tsv.pkl' )
lowercase : List[Any] = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(lowerCAmelCase, open(lowerCAmelCase, 'wb' ) )
lowercase : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name='legacy', index_path=self.tmpdirname, )
lowercase : List[Any] = RagRetriever(
lowerCAmelCase, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
lowercase : str = 1
lowercase : List[Any] = self.get_dummy_canonical_hf_index_retriever()
lowercase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase , lowercase , lowercase : Tuple = retriever.retrieve(lowerCAmelCase, n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ), lowerCAmelCase )
self.assertEqual(doc_dicts[0]['id'][0], '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0], '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def lowercase ( self : List[Any] ) -> int:
lowercase : Union[str, Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
lowercase : str = self.get_dummy_dataset()
retriever.save_pretrained(lowerCAmelCase )
lowercase : Optional[Any] = RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowercase : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase : int = retriever.retrieve(lowerCAmelCase, n_docs=1 )
self.assertTrue(out is not None )
def lowercase ( self : List[Any] ) -> int:
lowercase : Tuple = 1
lowercase : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
lowercase : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase , lowercase , lowercase : int = retriever.retrieve(lowerCAmelCase, n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ), lowerCAmelCase )
self.assertEqual(doc_dicts[0]['id'][0], '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0], '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def lowercase ( self : Optional[int] ) -> List[Any]:
lowercase : Any = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase )
lowercase : Tuple = RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowercase : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase : List[Any] = retriever.retrieve(lowerCAmelCase, n_docs=1 )
self.assertTrue(out is not None )
def lowercase ( self : Dict ) -> Union[str, Any]:
lowercase : Dict = 1
lowercase : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
lowercase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase , lowercase , lowercase : Tuple = retriever.retrieve(lowerCAmelCase, n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ), lowerCAmelCase )
self.assertEqual(doc_dicts[0]['id'][0], '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0], '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def lowercase ( self : Tuple ) -> Dict:
lowercase : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase )
lowercase : Optional[int] = RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowercase : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase : int = retriever.retrieve(lowerCAmelCase, n_docs=1 )
self.assertTrue(out is not None )
def lowercase ( self : List[Any] ) -> Dict:
lowercase : str = 1
lowercase : str = self.get_dummy_legacy_index_retriever()
lowercase : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase , lowercase , lowercase : Dict = retriever.retrieve(lowerCAmelCase, n_docs=lowerCAmelCase )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(lowerCAmelCase ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ), lowerCAmelCase )
self.assertEqual(doc_dicts[0]['text'][0], 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0], 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def lowercase ( self : int ) -> Dict:
lowercase : Optional[Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(lowerCAmelCase )
lowercase : List[str] = RagRetriever.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
lowercase : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase : List[str] = retriever.retrieve(lowerCAmelCase, n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowercase ( self : List[str] ) -> int:
import torch
lowercase : int = 1
lowercase : List[str] = self.get_dummy_canonical_hf_index_retriever()
lowercase : Union[str, Any] = [[5, 7], [10, 11]]
lowercase : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase : Optional[Any] = retriever(lowerCAmelCase, lowerCAmelCase, prefix=retriever.config.generator.prefix, n_docs=lowerCAmelCase )
lowercase , lowercase , lowercase : Dict = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase, np.ndarray )
lowercase : Optional[Any] = retriever(
lowerCAmelCase, lowerCAmelCase, prefix=retriever.config.generator.prefix, n_docs=lowerCAmelCase, return_tensors='pt', )
lowercase , lowercase , lowercase , lowercase : Optional[Any] = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(lowerCAmelCase, torch.Tensor )
self.assertIsInstance(lowerCAmelCase, torch.Tensor )
self.assertIsInstance(lowerCAmelCase, torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowercase ( self : int ) -> Optional[Any]:
lowercase : Any = self.get_dpr_ctx_encoder_tokenizer()
lowercase : int = 1
lowercase : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=lowerCAmelCase )
retriever.set_ctx_encoder_tokenizer(lowerCAmelCase )
lowercase : List[Any] = [[5, 7], [10, 11]]
lowercase : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
lowercase : List[Any] = retriever(lowerCAmelCase, lowerCAmelCase, prefix=retriever.config.generator.prefix, n_docs=lowerCAmelCase )
self.assertEqual(
len(lowerCAmelCase ), 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ), lowerCAmelCase ) # check for doc token related keys in dictionary.
| 53 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCamelCase ( self : Dict):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def lowerCamelCase ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
UpperCAmelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = AudioDiffusionPipeline(vqvae=_snake_case , unet=self.dummy_unet , mel=_snake_case , scheduler=_snake_case)
UpperCAmelCase_ = pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(42)
UpperCAmelCase_ = pipe(generator=_snake_case , steps=4)
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(42)
UpperCAmelCase_ = pipe(generator=_snake_case , steps=4 , return_dict=_snake_case)
UpperCAmelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''')[:10]
UpperCAmelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''')[:10]
UpperCAmelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() == 0
UpperCAmelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCAmelCase_ = DDIMScheduler()
UpperCAmelCase_ = self.dummy_vqvae_and_unet
UpperCAmelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_snake_case , scheduler=_snake_case)
UpperCAmelCase_ = pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
np.random.seed(0)
UpperCAmelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,))
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(42)
UpperCAmelCase_ = pipe(raw_audio=_snake_case , generator=_snake_case , start_step=5 , steps=10)
UpperCAmelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''')[:10]
UpperCAmelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
UpperCAmelCase_ = self.dummy_unet_condition
UpperCAmelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_snake_case , mel=_snake_case , scheduler=_snake_case)
UpperCAmelCase_ = pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
np.random.seed(0)
UpperCAmelCase_ = torch.rand((1, 1, 10))
UpperCAmelCase_ = pipe(generator=_snake_case , encoding=_snake_case)
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''')[:10]
UpperCAmelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = torch_device
UpperCAmelCase_ = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''')
UpperCAmelCase_ = pipe.to(_snake_case)
pipe.set_progress_bar_config(disable=_snake_case)
UpperCAmelCase_ = torch.Generator(device=_snake_case).manual_seed(42)
UpperCAmelCase_ = pipe(generator=_snake_case)
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype='''uint8''')[:10]
UpperCAmelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26])
assert np.abs(image_slice.flatten() - expected_slice).max() == 0
| 51 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_UpperCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_UpperCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_UpperCamelCase )
return parser.parse_args()
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =parse_args()
# Import training_script as a module.
_SCREAMING_SNAKE_CASE =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_SCREAMING_SNAKE_CASE =script_fpath.stem
_SCREAMING_SNAKE_CASE =importlib.import_module(_UpperCamelCase )
# Patch sys.argv
_SCREAMING_SNAKE_CASE =[args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 47 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Tuple = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase ( a__ ):
lowerCAmelCase : Union[str, Any] = """levit"""
def __init__( self , UpperCAmelCase__=224 , UpperCAmelCase__=3 , UpperCAmelCase__=3 , UpperCAmelCase__=2 , UpperCAmelCase__=1 , UpperCAmelCase__=16 , UpperCAmelCase__=[128, 256, 384] , UpperCAmelCase__=[4, 8, 12] , UpperCAmelCase__=[4, 4, 4] , UpperCAmelCase__=[16, 16, 16] , UpperCAmelCase__=0 , UpperCAmelCase__=[2, 2, 2] , UpperCAmelCase__=[2, 2, 2] , UpperCAmelCase__=0.02 , **UpperCAmelCase__ , ):
super().__init__(**_lowerCamelCase )
A__ = image_size
A__ = num_channels
A__ = kernel_size
A__ = stride
A__ = padding
A__ = hidden_sizes
A__ = num_attention_heads
A__ = depths
A__ = key_dim
A__ = drop_path_rate
A__ = patch_size
A__ = attention_ratio
A__ = mlp_ratio
A__ = initializer_range
A__ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase ( a__ ):
lowerCAmelCase : Tuple = version.parse("""1.11""" )
@property
def __A ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __A ( self ):
return 1e-4
| 354 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : List[str] = """linear"""
lowerCAmelCase : int = """cosine"""
lowerCAmelCase : Dict = """cosine_with_restarts"""
lowerCAmelCase : Optional[Any] = """polynomial"""
lowerCAmelCase : Dict = """constant"""
lowerCAmelCase : Any = """constant_with_warmup"""
lowerCAmelCase : Union[str, Any] = """piecewise_constant"""
def UpperCamelCase ( _A : Optimizer , _A : int = -1 )-> Dict:
"""simple docstring"""
return LambdaLR(_A , lambda _A : 1 , last_epoch=_A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int = -1 )-> Optional[Any]:
"""simple docstring"""
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1.0 , _A ) )
return 1.0
return LambdaLR(_A , _A , last_epoch=_A )
def UpperCamelCase ( _A : Optimizer , _A : str , _A : int = -1 )-> Dict:
"""simple docstring"""
A__ = {}
A__ = step_rules.split("," )
for rule_str in rule_list[:-1]:
A__ , A__ = rule_str.split(":" )
A__ = int(_A )
A__ = float(_A )
A__ = value
A__ = float(rule_list[-1] )
def create_rules_function(_A : Any , _A : Optional[int] ):
def rule_func(_A : int ) -> float:
A__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ = create_rules_function(_A , _A )
return LambdaLR(_A , _A , last_epoch=_A )
def UpperCamelCase ( _A : Any , _A : Union[str, Any] , _A : str , _A : str=-1 )-> Tuple:
"""simple docstring"""
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int , _A : float = 0.5 , _A : int = -1 )-> Any:
"""simple docstring"""
def lr_lambda(_A : Tuple ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_A ) * 2.0 * progress )) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int , _A : int = 1 , _A : int = -1 )-> Any:
"""simple docstring"""
def lr_lambda(_A : Tuple ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_A ) * progress) % 1.0) )) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Union[str, Any] , _A : Union[str, Any] , _A : List[str] , _A : Tuple=1E-7 , _A : Dict=1.0 , _A : Union[str, Any]=-1 )-> Any:
"""simple docstring"""
A__ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ = lr_init - lr_end
A__ = num_training_steps - num_warmup_steps
A__ = 1 - (current_step - num_warmup_steps) / decay_steps
A__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_A , _A , _A )
UpperCAmelCase_ : Any = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase ( _A : Union[str, SchedulerType] , _A : Optimizer , _A : Optional[str] = None , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 1.0 , _A : int = -1 , )-> Union[str, Any]:
"""simple docstring"""
A__ = SchedulerType(_A )
A__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_A , last_epoch=_A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_A , step_rules=_A , last_epoch=_A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_A , num_warmup_steps=_A , last_epoch=_A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , num_cycles=_A , last_epoch=_A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , power=_A , last_epoch=_A , )
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , last_epoch=_A )
| 198 | 0 |
import math
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] ) -> bool:
UpperCamelCase__ : Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(snake_case__ )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] = 1 / 1_2345 ) -> int:
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Union[str, Any] = 3
while True:
UpperCamelCase__ : Optional[int] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(snake_case__ ):
UpperCamelCase__ : Optional[int] = int(snake_case__ )
total_partitions += 1
if check_partition_perfect(snake_case__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(snake_case__ )
integer += 1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 201 | from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase__ : Dict = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase_ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->np.ndarray:
lowerCAmelCase = self.get_masked_index(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->str:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ) ->Dict[str, GenericTensor]:
if return_tensors is None:
lowerCAmelCase = self.framework
lowerCAmelCase = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE )
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = self.model(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_inputs['''input_ids''']
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=None ) ->str:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
lowerCAmelCase = target_ids.shape[0]
lowerCAmelCase = model_outputs['''input_ids'''][0]
lowerCAmelCase = model_outputs['''logits''']
if self.framework == "tf":
lowerCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowerCAmelCase = outputs.numpy()
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1 )
if target_ids is not None:
lowerCAmelCase = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0 ) , target_ids.reshape(-1 , 1 ) )
lowerCAmelCase = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
lowerCAmelCase = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE )
lowerCAmelCase , lowerCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
lowerCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowerCAmelCase = outputs[0, masked_index, :]
lowerCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
lowerCAmelCase = probs[..., target_ids]
lowerCAmelCase , lowerCAmelCase = probs.topk(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
lowerCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
lowerCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
lowerCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
lowerCAmelCase = target_ids[p].tolist()
lowerCAmelCase = p
# Filter padding out:
lowerCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowerCAmelCase = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
if single_mask:
return result[0]
return result
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) ->Optional[Any]:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowerCAmelCase = [targets]
try:
lowerCAmelCase = self.tokenizer.get_vocab()
except Exception:
lowerCAmelCase = {}
lowerCAmelCase = []
for target in targets:
lowerCAmelCase = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if id_ is None:
lowerCAmelCase = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
lowerCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowerCAmelCase = list(set(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
lowerCAmelCase = np.array(__SCREAMING_SNAKE_CASE )
return target_ids
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ) ->Dict:
lowerCAmelCase = {}
if targets is not None:
lowerCAmelCase = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = target_ids
if top_k is not None:
lowerCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) == 1:
return outputs[0]
return outputs
| 338 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
snake_case_ : List[Any] = Mapping[str, np.ndarray]
snake_case_ : Any = Mapping[str, Any] # Is a nested dict.
snake_case_ : Dict = 0.01
@dataclasses.dataclass(frozen=a )
class __snake_case :
UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCAmelCase__ : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCAmelCase__ : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCAmelCase__ : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCAmelCase__ : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCAmelCase__ : Optional[str] = None
# Templates used to generate this protein (prediction-only)
UpperCAmelCase__ : Optional[Sequence[str]] = None
# Chain corresponding to each parent
UpperCAmelCase__ : Optional[Sequence[int]] = None
def A (__A : str ) -> Protein:
"""simple docstring"""
UpperCAmelCase_ = R'''(\[[A-Z]+\]\n)'''
UpperCAmelCase_ = [tag.strip() for tag in re.split(__A , __A ) if len(__A ) > 0]
UpperCAmelCase_ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
UpperCAmelCase_ = ["N", "CA", "C"]
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
for g in groups:
if "[PRIMARY]" == g[0]:
UpperCAmelCase_ = g[1][0].strip()
for i in range(len(__A ) ):
if seq[i] not in residue_constants.restypes:
UpperCAmelCase_ = '''X''' # FIXME: strings are immutable
UpperCAmelCase_ = np.array(
[residue_constants.restype_order.get(__A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
UpperCAmelCase_ = []
for axis in range(3 ):
tertiary.append(list(map(__A , g[1][axis].split() ) ) )
UpperCAmelCase_ = np.array(__A )
UpperCAmelCase_ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__A ):
UpperCAmelCase_ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
UpperCAmelCase_ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
UpperCAmelCase_ = np.zeros(
(
len(__A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__A ):
UpperCAmelCase_ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__A , atom_mask=__A , aatype=__A , residue_index=np.arange(len(__A ) ) , b_factors=__A , )
def A (__A : Protein , __A : int = 0 ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
UpperCAmelCase_ = prot.parents
UpperCAmelCase_ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
UpperCAmelCase_ = [p for i, p in zip(__A , __A ) if i == chain_id]
if parents is None or len(__A ) == 0:
UpperCAmelCase_ = ['''N/A''']
pdb_headers.append(F"""PARENT {" ".join(__A )}""" )
return pdb_headers
def A (__A : Protein , __A : str ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = pdb_str.split('''\n''' )
UpperCAmelCase_ = prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
UpperCAmelCase_ = 42
if prot.parents is not None and len(prot.parents ) > 0:
UpperCAmelCase_ = []
if prot.parents_chain_index is not None:
UpperCAmelCase_ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__A ) , [] )
parent_dict[str(__A )].append(__A )
UpperCAmelCase_ = max([int(__A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
UpperCAmelCase_ = parent_dict.get(str(__A ) , ['''N/A'''] )
parents_per_chain.append(__A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
UpperCAmelCase_ = [['''N/A''']]
def make_parent_line(__A : Sequence[str] ) -> str:
return F"""PARENT {" ".join(__A )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
UpperCAmelCase_ = 0
for i, l in enumerate(__A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__A ):
UpperCAmelCase_ = parents_per_chain[chain_counter]
else:
UpperCAmelCase_ = ['''N/A''']
out_pdb_lines.append(make_parent_line(__A ) )
return "\n".join(__A )
def A (__A : Protein ) -> str:
"""simple docstring"""
UpperCAmelCase_ = residue_constants.restypes + ['''X''']
def res_atoa(__A : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
UpperCAmelCase_ = residue_constants.atom_types
UpperCAmelCase_ = []
UpperCAmelCase_ = prot.atom_mask
UpperCAmelCase_ = prot.aatype
UpperCAmelCase_ = prot.atom_positions
UpperCAmelCase_ = prot.residue_index.astype(np.intaa )
UpperCAmelCase_ = prot.b_factors
UpperCAmelCase_ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
UpperCAmelCase_ = get_pdb_headers(__A )
if len(__A ) > 0:
pdb_lines.extend(__A )
UpperCAmelCase_ = aatype.shape[0]
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = string.ascii_uppercase
UpperCAmelCase_ = None
# Add all atom sites.
for i in range(__A ):
UpperCAmelCase_ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
UpperCAmelCase_ = '''ATOM'''
UpperCAmelCase_ = atom_name if len(__A ) == 4 else F""" {atom_name}"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = 1.00
UpperCAmelCase_ = atom_name[0] # Protein supports only C, N, O, S, this works.
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = '''A'''
if chain_index is not None:
UpperCAmelCase_ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
UpperCAmelCase_ = (
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__A )
atom_index += 1
UpperCAmelCase_ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
UpperCAmelCase_ = True
UpperCAmelCase_ = chain_index[i + 1]
if should_terminate:
# Close the chain.
UpperCAmelCase_ = '''TER'''
UpperCAmelCase_ = (
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__A , __A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(__A )
def A (__A : Protein ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def A (__A : FeatureDict , __A : ModelOutput , __A : Optional[np.ndarray] = None , __A : Optional[np.ndarray] = None , __A : Optional[str] = None , __A : Optional[Sequence[str]] = None , __A : Optional[Sequence[int]] = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=__A , remark=__A , parents=__A , parents_chain_index=__A , )
| 7 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __snake_case :
@staticmethod
def lowerCamelCase ( *_snake_case : List[str] , **_snake_case : str):
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowerCamelCase ( self : Any , _snake_case : Optional[Any] , _snake_case : int , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png'''),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = vqa_pipeline(_snake_case , top_k=1)
self.assertEqual(
_snake_case , [
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
[{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}],
] , )
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question='''How many cats are there?''' , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
_snake_case , [{'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}, {'''score''': ANY(_snake_case), '''answer''': ANY(_snake_case)}])
@slow
@require_torch
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''')
UpperCAmelCase_ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase_ = '''How many cats are there?'''
UpperCAmelCase_ = vqa_pipeline(image=_snake_case , question=_snake_case , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}])
UpperCAmelCase_ = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2)
self.assertEqual(
nested_simplify(_snake_case , decimals=4) , [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2 , )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''')
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
pass
| 7 | 1 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
snake_case_ = ''''''
while len(__UpperCAmelCase ) % 3 != 0:
snake_case_ = '''0''' + bin_string
snake_case_ = [
bin_string[index : index + 3]
for index in range(len(__UpperCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
snake_case_ = 0
for index, val in enumerate(__UpperCAmelCase ):
oct_val += int(2 ** (2 - index) * int(__UpperCAmelCase ) )
oct_string += str(__UpperCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 56 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
a : Dict = logging.get_logger(__name__)
a : List[str] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class a ( _lowerCamelCase ):
snake_case_ = "marian"
snake_case_ = ["past_key_values"]
snake_case_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[Any] , lowercase_ : Optional[Any]=5_8101 , lowercase_ : Dict=None , lowercase_ : List[str]=1024 , lowercase_ : Optional[Any]=12 , lowercase_ : int=4096 , lowercase_ : Any=16 , lowercase_ : Optional[int]=12 , lowercase_ : str=4096 , lowercase_ : Union[str, Any]=16 , lowercase_ : Dict=0.0 , lowercase_ : Union[str, Any]=0.0 , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : int="gelu" , lowercase_ : Dict=1024 , lowercase_ : int=0.1 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.0 , lowercase_ : Tuple=0.02 , lowercase_ : int=5_8100 , lowercase_ : Optional[Any]=False , lowercase_ : Any=5_8100 , lowercase_ : Optional[int]=0 , lowercase_ : Dict=0 , lowercase_ : List[str]=True , **lowercase_ : Any , ):
snake_case_ = vocab_size
snake_case_ = decoder_vocab_size or vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = encoder_ffn_dim
snake_case_ = encoder_layers
snake_case_ = encoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = activation_function
snake_case_ = init_std
snake_case_ = encoder_layerdrop
snake_case_ = decoder_layerdrop
snake_case_ = use_cache
snake_case_ = encoder_layers
snake_case_ = scale_embedding # scale factor will be sqrt(d_model) if True
snake_case_ = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , forced_eos_token_id=lowercase_ , **lowercase_ , )
class a ( _lowerCamelCase ):
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def A_ ( self : Union[str, Any] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case_ = {0: '''batch'''}
snake_case_ = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case_ = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(lowercase_ , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case_ ,snake_case_ = self.num_layers
for i in range(lowercase_ ):
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case_ = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def A_ ( self : Dict ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = super().outputs
else:
snake_case_ = super(lowercase_ , self ).outputs
if self.use_past:
snake_case_ ,snake_case_ = self.num_layers
for i in range(lowercase_ ):
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case_ = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def A_ ( self : Dict , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Generate decoder inputs
snake_case_ = seq_length if not self.use_past else 1
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
snake_case_ = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
snake_case_ = dict(**lowercase_ , **lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape
snake_case_ = common_inputs['''decoder_input_ids'''].shape[1]
snake_case_ ,snake_case_ = self.num_attention_heads
snake_case_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ = decoder_seq_length + 3
snake_case_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case_ = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(lowercase_ , lowercase_ )] , dim=1 )
snake_case_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case_ ,snake_case_ = self.num_layers
snake_case_ = min(lowercase_ , lowercase_ )
snake_case_ = max(lowercase_ , lowercase_ ) - min_num_layers
snake_case_ = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(lowercase_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
torch.zeros(lowercase_ ),
) )
# TODO: test this.
snake_case_ = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(lowercase_ , lowercase_ ):
common_inputs["past_key_values"].append((torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) )
return common_inputs
def A_ ( self : Union[str, Any] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
snake_case_ = self._generate_dummy_inputs_for_encoder_and_decoder(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case_ ,snake_case_ = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case_ = seqlen + 2
snake_case_ ,snake_case_ = self.num_layers
snake_case_ ,snake_case_ = self.num_attention_heads
snake_case_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case_ = common_inputs['''attention_mask'''].dtype
snake_case_ = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(lowercase_ , lowercase_ , dtype=lowercase_ )] , dim=1 )
snake_case_ = [
(torch.zeros(lowercase_ ), torch.zeros(lowercase_ )) for _ in range(lowercase_ )
]
return common_inputs
def A_ ( self : List[str] , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case_ = tokenizer.num_special_tokens_to_add(lowercase_ )
snake_case_ = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
snake_case_ = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case_ = dict(tokenizer(lowercase_ , return_tensors=lowercase_ ) )
return common_inputs
def A_ ( self : Any , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
else:
snake_case_ = self._generate_dummy_inputs_for_causal_lm(
lowercase_ , batch_size=lowercase_ , seq_length=lowercase_ , is_pair=lowercase_ , framework=lowercase_ )
return common_inputs
def A_ ( self : Dict , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case_ = super()._flatten_past_key_values_(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
else:
snake_case_ = super(lowercase_ , self )._flatten_past_key_values_(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
@property
def A_ ( self : List[str] ):
return 1e-4
| 56 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
_A = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_A = model(__UpperCAmelCase )["last_hidden_state"]
_A = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
# compare the actual values for a slice.
_A = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 350 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 174 | 0 |
"""simple docstring"""
def A_ ( _lowercase = 3, _lowercase = 7, _lowercase = 1000000 ):
'''simple docstring'''
snake_case_ :List[Any] = 0
snake_case_ :Any = 1
for current_denominator in range(1, limit + 1 ):
snake_case_ :int = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case_ :List[str] = current_numerator
snake_case_ :Any = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 66 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = (DDPMParallelScheduler,)
def SCREAMING_SNAKE_CASE_ (self : Any , **UpperCAmelCase_ : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Any ={
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[int]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Any) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.0_0979)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1E-5
def SCREAMING_SNAKE_CASE_ (self : Any) ->str:
'''simple docstring'''
lowerCamelCase__: int =self.scheduler_classes[0]
lowerCamelCase__: Tuple =self.get_scheduler_config()
lowerCamelCase__: Tuple =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.dummy_model()
lowerCamelCase__: int =self.dummy_sample_deter
lowerCamelCase__: Union[str, Any] =self.dummy_sample_deter + 0.1
lowerCamelCase__: Optional[Any] =self.dummy_sample_deter - 0.1
lowerCamelCase__: Optional[Any] =samplea.shape[0]
lowerCamelCase__: List[Any] =torch.stack([samplea, samplea, samplea] , dim=0)
lowerCamelCase__: Union[str, Any] =torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
lowerCamelCase__: Tuple =scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1))
lowerCamelCase__: List[str] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Any =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1153.1833) < 1E-2
assert abs(result_mean.item() - 0.5005) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[int] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =len(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =self.dummy_model()
lowerCamelCase__: List[Any] =self.dummy_sample_deter
lowerCamelCase__: int =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Optional[Any] =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: Any =pred_prev_sample
lowerCamelCase__: Any =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: List[str] =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 258.9606) < 1E-2
assert abs(result_mean.item() - 0.3372) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config(prediction_type="v_prediction")
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: str =len(UpperCAmelCase_)
lowerCamelCase__: str =self.dummy_model()
lowerCamelCase__: str =self.dummy_sample_deter
lowerCamelCase__: Dict =torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase_)):
# 1. predict noise residual
lowerCamelCase__: Union[str, Any] =model(UpperCAmelCase_ , UpperCAmelCase_)
# 2. predict previous mean of sample x_t-1
lowerCamelCase__: Dict =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_).prev_sample
lowerCamelCase__: List[str] =pred_prev_sample
lowerCamelCase__: List[Any] =torch.sum(torch.abs(UpperCAmelCase_))
lowerCamelCase__: Tuple =torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 202.0296) < 1E-2
assert abs(result_mean.item() - 0.2631) < 1E-3
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =self.scheduler_classes[0]
lowerCamelCase__: Union[str, Any] =self.get_scheduler_config()
lowerCamelCase__: Any =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: List[Any] =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase_):
if i == len(UpperCAmelCase_) - 1:
lowerCamelCase__: Dict =-1
else:
lowerCamelCase__: Union[str, Any] =timesteps[i + 1]
lowerCamelCase__: Tuple =scheduler.previous_timestep(UpperCAmelCase_)
lowerCamelCase__: str =prev_t.item()
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: List[Any] =self.get_scheduler_config()
lowerCamelCase__: Dict =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =[100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase_ , msg="`custom_timesteps` must be in descending order."):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =self.scheduler_classes[0]
lowerCamelCase__: Any =self.get_scheduler_config()
lowerCamelCase__: int =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =[100, 87, 50, 1, 0]
lowerCamelCase__: int =len(UpperCAmelCase_)
with self.assertRaises(UpperCAmelCase_ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`."):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase_ , timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.scheduler_classes[0]
lowerCamelCase__: Optional[Any] =self.get_scheduler_config()
lowerCamelCase__: Optional[Any] =scheduler_class(**UpperCAmelCase_)
lowerCamelCase__: Dict =[scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase_ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase_)
| 10 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowercase_ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowercase_ = {"""facebook/blenderbot_small-90M""": 512}
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
lowercase__ = set(SCREAMING_SNAKE_CASE_ )
return pairs
class _snake_case ( lowercase__):
UpperCamelCase__ : List[Any] =VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[str] =["""input_ids""", """attention_mask"""]
def __init__( self : int, __lowercase : List[Any], __lowercase : Any, __lowercase : Any="__start__", __lowercase : Any="__end__", __lowercase : List[str]="__unk__", __lowercase : List[Any]="__null__", **__lowercase : List[str], ):
super().__init__(unk_token=__lowercase, bos_token=__lowercase, eos_token=__lowercase, pad_token=__lowercase, **__lowercase )
with open(__lowercase, encoding="utf-8" ) as vocab_handle:
lowercase__ = json.load(__lowercase )
lowercase__ = {v: k for k, v in self.encoder.items()}
with open(__lowercase, encoding="utf-8" ) as merges_handle:
lowercase__ = merges_handle.read().split("\n" )[1:-1]
lowercase__ = [tuple(merge.split() ) for merge in merges]
lowercase__ = dict(zip(__lowercase, range(len(__lowercase ) ) ) )
lowercase__ = {}
@property
def A__ ( self : Tuple ):
return len(self.encoder )
def A__ ( self : List[Any] ):
return dict(self.encoder, **self.added_tokens_encoder )
def A__ ( self : Optional[int], __lowercase : str ):
if token in self.cache:
return self.cache[token]
lowercase__ = re.sub("([.,!?()])", R" \1", __lowercase )
lowercase__ = re.sub("(')", R" \1 ", __lowercase )
lowercase__ = re.sub(R"\s{2,}", " ", __lowercase )
if "\n" in token:
lowercase__ = token.replace("\n", " __newln__" )
lowercase__ = token.split(" " )
lowercase__ = []
for token in tokens:
if not len(__lowercase ):
continue
lowercase__ = token.lower()
lowercase__ = tuple(__lowercase )
lowercase__ = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowercase__ = get_pairs(__lowercase )
if not pairs:
words.append(__lowercase )
continue
while True:
lowercase__ = min(__lowercase, key=lambda __lowercase : self.bpe_ranks.get(__lowercase, float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(__lowercase ):
try:
lowercase__ = word.index(__lowercase, __lowercase )
new_word.extend(word[i:j] )
lowercase__ = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ = tuple(__lowercase )
lowercase__ = new_word
if len(__lowercase ) == 1:
break
else:
lowercase__ = get_pairs(__lowercase )
lowercase__ = "@@ ".join(__lowercase )
lowercase__ = word[:-4]
lowercase__ = word
words.append(__lowercase )
return " ".join(__lowercase )
def A__ ( self : Tuple, __lowercase : str ):
lowercase__ = []
lowercase__ = re.findall(R"\S+\n?", __lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(" " ) ) )
return split_tokens
def A__ ( self : str, __lowercase : str ):
lowercase__ = token.lower()
return self.encoder.get(__lowercase, self.encoder.get(self.unk_token ) )
def A__ ( self : Tuple, __lowercase : int ):
return self.decoder.get(__lowercase, self.unk_token )
def A__ ( self : Any, __lowercase : List[str] ):
lowercase__ = " ".join(__lowercase ).replace("@@ ", "" ).strip()
return out_string
def A__ ( self : List[str], __lowercase : str, __lowercase : Optional[str] = None ):
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
__lowercase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ = os.path.join(
__lowercase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowercase, "w", encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__lowercase, ensure_ascii=__lowercase ) + "\n" )
lowercase__ = 0
with open(__lowercase, "w", encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowercase__ = token_index
writer.write(" ".join(__lowercase ) + "\n" )
index += 1
return vocab_file, merge_file
| 224 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowercase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786,
1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791,
1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409,
3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361
]
lowercase_ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793,
1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675,
2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865,
4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362
]
class _snake_case ( lowercase__):
UpperCamelCase__ : Optional[int] ="""whisper"""
UpperCamelCase__ : Optional[int] =["""past_key_values"""]
UpperCamelCase__ : Optional[Any] ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any], __lowercase : List[str]=5_1865, __lowercase : Dict=80, __lowercase : List[Any]=6, __lowercase : Union[str, Any]=4, __lowercase : Tuple=6, __lowercase : Dict=4, __lowercase : List[Any]=1536, __lowercase : Tuple=1536, __lowercase : Any=0.0, __lowercase : List[str]=0.0, __lowercase : List[Any]=5_0257, __lowercase : List[str]=True, __lowercase : str=True, __lowercase : int="gelu", __lowercase : Tuple=256, __lowercase : Tuple=0.0, __lowercase : List[Any]=0.0, __lowercase : Optional[int]=0.0, __lowercase : List[Any]=0.02, __lowercase : Union[str, Any]=False, __lowercase : str=1500, __lowercase : Optional[int]=448, __lowercase : Optional[Any]=5_0256, __lowercase : Tuple=5_0256, __lowercase : Any=5_0256, __lowercase : Union[str, Any]=None, __lowercase : Any=[220, 5_0256], __lowercase : List[Any]=False, __lowercase : int=256, __lowercase : int=False, __lowercase : Tuple=0.05, __lowercase : int=10, __lowercase : Dict=2, __lowercase : List[Any]=0.0, __lowercase : Optional[int]=10, __lowercase : Union[str, Any]=0, __lowercase : Tuple=7, **__lowercase : Union[str, Any], ):
lowercase__ = vocab_size
lowercase__ = num_mel_bins
lowercase__ = d_model
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = encoder_ffn_dim
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = max_source_positions
lowercase__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
lowercase__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
lowercase__ = median_filter_width
super().__init__(
pad_token_id=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, is_encoder_decoder=__lowercase, decoder_start_token_id=__lowercase, suppress_tokens=__lowercase, begin_suppress_tokens=__lowercase, **__lowercase, )
class _snake_case ( lowercase__):
@property
def A__ ( self : str ):
lowercase__ = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowercase__ = {0: "batch"}
else:
lowercase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__lowercase, direction="inputs" )
return common_inputs
def A__ ( self : int, __lowercase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], __lowercase : int = -1, __lowercase : int = -1, __lowercase : bool = False, __lowercase : Optional["TensorType"] = None, __lowercase : int = 2_2050, __lowercase : float = 5.0, __lowercase : int = 220, ):
lowercase__ = OrderedDict()
lowercase__ = OnnxConfig.generate_dummy_inputs(
self, preprocessor=preprocessor.feature_extractor, batch_size=__lowercase, framework=__lowercase, sampling_rate=__lowercase, time_duration=__lowercase, frequency=__lowercase, )
lowercase__ = encoder_inputs["input_features"].shape[2]
lowercase__ = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ = super().generate_dummy_inputs(
preprocessor.tokenizer, __lowercase, __lowercase, __lowercase, __lowercase )
lowercase__ = encoder_inputs.pop("input_features" )
lowercase__ = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowercase__ = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def A__ ( self : int ):
return 1e-3
| 224 | 1 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
_snake_case : Optional[int] = False
_snake_case : Union[str, Any] = False
def a_ ( lowerCAmelCase_ : Namespace ):
return TrainCommand(__a )
class _UpperCAmelCase ( lowercase__ ):
"""simple docstring"""
@staticmethod
def lowercase ( lowerCAmelCase_ : ArgumentParser ) -> Optional[int]:
__lowerCAmelCase = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=_a , required=_a , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=_a , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=_a , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=_a , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=_a , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=_a , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=_a , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=_a , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=_a , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=_a , default=3_2 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=_a , default=6_4 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=_a , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=_a , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=_a )
def __init__( self : int , lowerCAmelCase_ : Namespace ) -> Union[str, Any]:
__lowerCAmelCase = logging.get_logger('transformers-cli/training' )
__lowerCAmelCase = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output , exist_ok=_a )
__lowerCAmelCase = args.output
__lowerCAmelCase = args.column_label
__lowerCAmelCase = args.column_text
__lowerCAmelCase = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
__lowerCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
__lowerCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowerCAmelCase = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
__lowerCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowerCAmelCase = args.validation_split
__lowerCAmelCase = args.train_batch_size
__lowerCAmelCase = args.valid_batch_size
__lowerCAmelCase = args.learning_rate
__lowerCAmelCase = args.adam_epsilon
def lowercase ( self : Any ) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowercase ( self : Tuple ) -> Any:
raise NotImplementedError
def lowercase ( self : List[str] ) -> Tuple:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 284 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : List[Any] = None
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def __lowercase ( self : str ):
'''simple docstring'''
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a ,'feature_size' ) )
self.assertTrue(hasattr(_a ,'sampling_rate' ) )
self.assertTrue(hasattr(_a ,'padding_value' ) )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_tester.prepare_inputs_for_common()
_a : str = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(_a ) == len(_a ) for x, y in zip(_a ,processed_features[input_name] ) ) )
_a : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Union[str, Any] = BatchFeature({input_name: speech_inputs} ,tensor_type='np' )
_a : Union[str, Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[int] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def __lowercase ( self : Any ):
'''simple docstring'''
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : int = feat_extract.model_input_names[0]
_a : str = BatchFeature({input_name: speech_inputs} ,tensor_type='pt' )
_a : str = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : str = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_a )
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = feat_extract.model_input_names[0]
_a : int = BatchFeature({input_name: speech_inputs} ,tensor_type='tf' )
_a : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_a : Optional[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def __lowercase ( self : Dict ,_a : Any=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : Tuple ):
_a : Tuple = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : Optional[Any] ,_a : Union[str, Any] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : int = self.feature_extraction_class(**self.feat_extract_dict )
_a : Tuple = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Tuple = BatchFeature({input_name: speech_inputs} )
_a : str = self.feat_extract_tester.seq_length_diff
_a : Dict = self.feat_extract_tester.max_seq_length + pad_diff
_a : Dict = self.feat_extract_tester.min_seq_length
_a : Optional[Any] = self.feat_extract_tester.batch_size
_a : Tuple = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_a : int = feat_extract.pad(_a ,padding=_a )
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(_a ,padding='longest' )
_a : Any = input_a[input_name]
_a : Optional[Any] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[-1] ) )
_a : List[str] = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
_a : str = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' )[input_name]
_a : int = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,return_tensors='np' )
_a : Optional[int] = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_a : Tuple = feat_extract.pad(_a ,pad_to_multiple_of=10 )
_a : List[str] = input_a[input_name]
_a : str = feat_extract.pad(_a ,padding='longest' ,pad_to_multiple_of=10 )
_a : Tuple = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a )
_a : Any = input_a[input_name]
_a : Optional[int] = feat_extract.pad(
_a ,padding='max_length' ,pad_to_multiple_of=10 ,max_length=_a ,return_tensors='np' ,)
_a : Dict = input_a[input_name]
self.assertTrue(all(len(_a ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
_a : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(_a ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] ,(batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_a : Any = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def __lowercase ( self : List[Any] ,_a : Optional[int]=False ):
'''simple docstring'''
def _inputs_have_equal_length(_a : List[str] ):
_a : Union[str, Any] = len(input[0] )
for input_slice in input[1:]:
if len(_a ) != length:
return False
return True
def _inputs_are_equal(_a : List[str] ,_a : List[str] ):
if len(_a ) != len(_a ):
return False
for input_slice_a, input_slice_a in zip(_a ,_a ):
if not np.allclose(np.asarray(_a ) ,np.asarray(_a ) ,atol=1E-3 ):
return False
return True
_a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_a : str = self.feat_extract_tester.prepare_inputs_for_common(numpify=_a )
_a : Any = feat_extract.model_input_names[0]
_a : List[Any] = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_a : Union[str, Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,truncation=_a )
_a : str = input_a[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) )
_a : Tuple = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to smallest with np
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' ,truncation=_a ,)
_a : Any = input_a[input_name]
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,return_tensors='np' )
_a : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
# truncate to middle
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a ,return_tensors='np' ,)
_a : List[Any] = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,truncation=_a )
_a : Tuple = input_a[input_name]
_a : Tuple = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[1] ) ,return_tensors='np' )
_a : Dict = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertTrue(_inputs_are_equal(_a ,_a ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(_a ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='longest' ,truncation=_a )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(_a ):
feat_extract.pad(_a ,padding='max_length' ,truncation=_a )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_a : Optional[Any] = 12
_a : List[Any] = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,truncation=_a ,)
_a : Tuple = input_a[input_name]
_a : str = feat_extract.pad(
_a ,padding='max_length' ,max_length=len(speech_inputs[0] ) ,pad_to_multiple_of=_a ,)
_a : List[Any] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_a : List[Any] = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_a : Union[str, Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(_a ) )
self.assertFalse(_inputs_have_equal_length(_a ) )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
self._check_padding(numpify=_a )
def __lowercase ( self : Dict ):
'''simple docstring'''
self._check_truncation(numpify=_a )
def __lowercase ( self : str ):
'''simple docstring'''
self._check_truncation(numpify=_a )
@require_torch
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : Any = self.feature_extraction_class(**self.feat_extract_dict )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Optional[int] = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : List[str] = feat_extract.pad(_a ,padding='longest' ,return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def __lowercase ( self : int ):
'''simple docstring'''
_a : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
_a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : Dict = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )[input_name]
_a : Any = feat_extract.pad(_a ,padding='longest' ,return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
_a : str = self.feat_extract_dict
_a : List[Any] = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_a : Tuple = [len(_a ) for x in speech_inputs]
_a : int = feat_extract.model_input_names[0]
_a : Optional[Any] = BatchFeature({input_name: speech_inputs} )
_a : str = feat_extract.pad(_a ,padding='longest' ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(list(processed.attention_mask.shape ) ,list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() ,_a )
def __lowercase ( self : int ):
'''simple docstring'''
_a : Any = self.feat_extract_dict
_a : Tuple = True
_a : Optional[int] = self.feature_extraction_class(**_a )
_a : Dict = self.feat_extract_tester.prepare_inputs_for_common()
_a : Dict = [len(_a ) for x in speech_inputs]
_a : Union[str, Any] = feat_extract.model_input_names[0]
_a : Any = BatchFeature({input_name: speech_inputs} )
_a : List[Any] = min(_a )
_a : Dict = feat_extract.pad(
_a ,padding='max_length' ,max_length=_a ,truncation=_a ,return_tensors='np' )
self.assertIn('attention_mask' ,_a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) ,[processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() ,[max_length for x in speech_inputs] )
| 271 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
lowerCAmelCase__ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def snake_case_ ( A_ : str, A_ : Tuple, A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = state_dict.pop(A_ )
_lowerCamelCase : Union[str, Any] = val
def snake_case_ ( A_ : Any ):
'''simple docstring'''
_lowerCamelCase : Tuple = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowerCamelCase : List[Any] = key.replace('''backbone.0.body''', '''backbone.conv_encoder.model''' )
_lowerCamelCase : int = value
else:
_lowerCamelCase : List[str] = value
return new_state_dict
def snake_case_ ( A_ : Optional[int], A_ : List[str]=False ):
'''simple docstring'''
_lowerCamelCase : Any = ''''''
if is_panoptic:
_lowerCamelCase : Optional[Any] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCamelCase : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowerCamelCase : Dict = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : str = in_proj_weight[:2_56, :]
_lowerCamelCase : int = in_proj_bias[:2_56]
_lowerCamelCase : str = in_proj_weight[2_56:5_12, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[2_56:5_12]
_lowerCamelCase : List[Any] = in_proj_weight[-2_56:, :]
_lowerCamelCase : List[str] = in_proj_bias[-2_56:]
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase : Any = Image.open(requests.get(A_, stream=A_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( A_ : Optional[Any], A_ : List[Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_lowerCamelCase : Union[str, Any] = '''resnet101'''
if "dc5" in model_name:
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : Tuple = '''panoptic''' in model_name
if is_panoptic:
_lowerCamelCase : Optional[int] = 2_50
else:
_lowerCamelCase : int = 91
_lowerCamelCase : List[str] = '''huggingface/label-files'''
_lowerCamelCase : Any = '''coco-detection-id2label.json'''
_lowerCamelCase : Optional[int] = json.load(open(hf_hub_download(A_, A_, repo_type='''dataset''' ), '''r''' ) )
_lowerCamelCase : List[str] = {int(A_ ): v for k, v in idalabel.items()}
_lowerCamelCase : List[str] = idalabel
_lowerCamelCase : str = {v: k for k, v in idalabel.items()}
# load image processor
_lowerCamelCase : int = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
_lowerCamelCase : Any = ConditionalDetrImageProcessor(format=A_ )
# prepare image
_lowerCamelCase : Optional[int] = prepare_img()
_lowerCamelCase : str = image_processor(images=A_, return_tensors='''pt''' )
_lowerCamelCase : Union[str, Any] = encoding['''pixel_values''']
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
_lowerCamelCase : int = torch.hub.load('''DeppMeng/ConditionalDETR''', A_, pretrained=A_ ).eval()
_lowerCamelCase : Tuple = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_lowerCamelCase : Optional[Any] = '''conditional_detr.''' + src
rename_key(A_, A_, A_ )
_lowerCamelCase : Dict = rename_backbone_keys(A_ )
# query, key and value matrices need special treatment
read_in_q_k_v(A_, is_panoptic=A_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCamelCase : Optional[int] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
_lowerCamelCase : List[Any] = state_dict.pop(A_ )
_lowerCamelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowerCamelCase : List[str] = state_dict.pop(A_ )
_lowerCamelCase : Optional[Any] = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
_lowerCamelCase : Optional[Any] = state_dict.pop(A_ )
_lowerCamelCase : Any = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
_lowerCamelCase : int = state_dict.pop(A_ )
_lowerCamelCase : str = val
# finally, create HuggingFace model and load state dict
_lowerCamelCase : Dict = ConditionalDetrForSegmentation(A_ ) if is_panoptic else ConditionalDetrForObjectDetection(A_ )
model.load_state_dict(A_ )
model.eval()
model.push_to_hub(repo_id=A_, organization='''DepuMeng''', commit_message='''Add model''' )
# verify our conversion
_lowerCamelCase : Dict = conditional_detr(A_ )
_lowerCamelCase : Optional[int] = model(A_ )
assert torch.allclose(outputs.logits, original_outputs['''pred_logits'''], atol=1E-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs['''pred_boxes'''], atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs['''pred_masks'''], atol=1E-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 175 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
__snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
__snake_case = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def a ( __a , __a , __a=8 ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Any = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
UpperCamelCase__ :Tuple = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
UpperCamelCase__ :Union[str, Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if latents is None:
UpperCamelCase__ :str = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
UpperCamelCase__ :Dict = latents.to(UpperCamelCase_ )
UpperCamelCase__ :Any = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = len(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else 1
# get prompt text embeddings
UpperCamelCase__ :List[str] = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , truncation=UpperCamelCase_ , max_length=77 , return_attention_mask=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='''pt''' , )
UpperCamelCase__ :str = text_inputs.input_ids
UpperCamelCase__ :List[str] = self.tokenizer(UpperCamelCase_ , padding='''longest''' , return_tensors='''pt''' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Tuple = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase__ :Union[str, Any] = text_input_ids.to(UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = text_inputs.attention_mask.to(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.text_encoder(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
UpperCamelCase__ :Tuple = prompt_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
UpperCamelCase__ :List[Any] = text_encoder_hidden_states.repeat_interleave(UpperCamelCase_ , dim=0 )
UpperCamelCase__ :str = text_mask.repeat_interleave(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ :List[str]
if negative_prompt is None:
UpperCamelCase__ :Dict = [''''''] * batch_size
elif type(UpperCamelCase_ ) is not type(UpperCamelCase_ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase_ )} !='''
F''' {type(UpperCamelCase_ )}.''' )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :List[str] = [negative_prompt]
elif batch_size != len(UpperCamelCase_ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase_ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
UpperCamelCase__ :Tuple = negative_prompt
UpperCamelCase__ :int = self.tokenizer(
UpperCamelCase_ , padding='''max_length''' , max_length=77 , truncation=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors='''pt''' , )
UpperCamelCase__ :List[str] = uncond_input.input_ids.to(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = uncond_input.attention_mask.to(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self.text_encoder(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ :Optional[int] = negative_prompt_embeds.shape[1]
UpperCamelCase__ :List[Any] = negative_prompt_embeds.repeat(1 , UpperCamelCase_ )
UpperCamelCase__ :Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase_ )
UpperCamelCase__ :List[str] = uncond_text_encoder_hidden_states.shape[1]
UpperCamelCase__ :Tuple = uncond_text_encoder_hidden_states.repeat(1 , UpperCamelCase_ , 1 )
UpperCamelCase__ :List[str] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , UpperCamelCase_ , -1 )
UpperCamelCase__ :List[Any] = uncond_text_mask.repeat_interleave(UpperCamelCase_ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ :Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
UpperCamelCase__ :Optional[int] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
UpperCamelCase__ :int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def lowerCAmelCase__ ( self , UpperCamelCase_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCamelCase__ :int = torch.device(F'''cuda:{gpu_id}''' )
UpperCamelCase__ :Tuple = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCamelCase__ :Tuple = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCamelCase__ :str = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
if self.safety_checker is not None:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = cpu_offload_with_hook(self.safety_checker , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
UpperCamelCase__ :Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = 512 , UpperCamelCase_ = 512 , UpperCamelCase_ = 100 , UpperCamelCase_ = 4.0 , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , ):
'''simple docstring'''
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :int = 1
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = len(UpperCamelCase_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase_ )}''' )
UpperCamelCase__ :Optional[int] = self._execution_device
UpperCamelCase__ :Any = batch_size * num_images_per_prompt
UpperCamelCase__ :List[Any] = guidance_scale > 1.0
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = self._encode_prompt(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Optional[Any] = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
UpperCamelCase__ :Optional[int] = torch.cat(UpperCamelCase_ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase__ :int = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
UpperCamelCase__ :List[Any] = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
UpperCamelCase__ :List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
UpperCamelCase__ :int = self.scheduler.timesteps
UpperCamelCase__ :List[Any] = self.unet.config.in_channels
UpperCamelCase__ , UpperCamelCase__ :Dict = get_new_h_w(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
UpperCamelCase__ :List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase__ :List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase__ :Any = {'''text_embeds''': prompt_embeds, '''image_embeds''': image_embeds}
UpperCamelCase__ :Optional[int] = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
UpperCamelCase__ , UpperCamelCase__ :List[Any] = noise_pred.chunk(2 )
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = variance_pred.chunk(2 )
UpperCamelCase__ :int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCamelCase__ :Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCamelCase__ , UpperCamelCase__ :int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ :Optional[int] = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , ).prev_sample
# post-processing
UpperCamelCase__ :List[Any] = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
UpperCamelCase__ :List[str] = image * 0.5 + 0.5
UpperCamelCase__ :Optional[Any] = image.clamp(0 , 1 )
UpperCamelCase__ :List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ :str = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ ) | 97 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase : int = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "encodec"
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Tuple=[1.5, 3.0, 6.0, 12.0, 24.0] , lowerCAmelCase_ : Tuple=2_4_0_0_0 , lowerCAmelCase_ : List[str]=1 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=1_2_8 , lowerCAmelCase_ : Dict=3_2 , lowerCAmelCase_ : Tuple=1 , lowerCAmelCase_ : Dict=[8, 5, 4, 2] , lowerCAmelCase_ : Optional[Any]="weight_norm" , lowerCAmelCase_ : Any=7 , lowerCAmelCase_ : int=7 , lowerCAmelCase_ : str=3 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : int="reflect" , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : Optional[int]=2 , lowerCAmelCase_ : List[Any]=1.0 , lowerCAmelCase_ : Dict=1_0_2_4 , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=True , **lowerCAmelCase_ : List[str] , ):
"""simple docstring"""
lowercase_ = target_bandwidths
lowercase_ = sampling_rate
lowercase_ = audio_channels
lowercase_ = normalize
lowercase_ = chunk_length_s
lowercase_ = overlap
lowercase_ = hidden_size
lowercase_ = num_filters
lowercase_ = num_residual_layers
lowercase_ = upsampling_ratios
lowercase_ = norm_type
lowercase_ = kernel_size
lowercase_ = last_kernel_size
lowercase_ = residual_kernel_size
lowercase_ = dilation_growth_rate
lowercase_ = use_causal_conv
lowercase_ = pad_mode
lowercase_ = compress
lowercase_ = num_lstm_layers
lowercase_ = trim_right_ratio
lowercase_ = codebook_size
lowercase_ = codebook_dim if codebook_dim is not None else hidden_size
lowercase_ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''')
super().__init__(**lowerCAmelCase_)
@property
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0))
| 136 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
lowerCamelCase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase = {f'''funnel-transformer/{name}''': 512 for name in _model_names}
lowerCamelCase = {f'''funnel-transformer/{name}''': {'''do_lower_case''': True} for name in _model_names}
class _a ( _lowercase):
_a : Tuple = VOCAB_FILES_NAMES
_a : Dict = PRETRAINED_VOCAB_FILES_MAP
_a : Dict = PRETRAINED_INIT_CONFIGURATION
_a : Union[str, Any] = FunnelTokenizer
_a : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : int = 2
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Any="<unk>" , _SCREAMING_SNAKE_CASE : Dict="<sep>" , _SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , _SCREAMING_SNAKE_CASE : str="<cls>" , _SCREAMING_SNAKE_CASE : List[str]="<mask>" , _SCREAMING_SNAKE_CASE : Optional[int]="<s>" , _SCREAMING_SNAKE_CASE : Dict="</s>" , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : str="##" , **_SCREAMING_SNAKE_CASE : List[str] , )-> List[str]:
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , clean_text=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , wordpieces_prefix=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
lowerCAmelCase__ : int = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop('''type''' ) )
lowerCAmelCase__ : Dict = do_lower_case
lowerCAmelCase__ : str = strip_accents
lowerCAmelCase__ : Dict = tokenize_chinese_chars
lowerCAmelCase__ : str = normalizer_class(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = do_lower_case
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any]=None )-> Optional[int]:
lowerCAmelCase__ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None )-> Tuple[str]:
lowerCAmelCase__ : Any = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 211 | 0 |
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__A = logging.get_logger(__name__)
def lowerCAmelCase_ ( __a , __a , __a , __a=False ) -> Optional[int]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
lowerCamelCase__: Optional[Any] =os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(F"""Loading PyTorch weights from {pt_path}""" )
lowerCamelCase__: Union[str, Any] =torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
logger.info(F"""PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.""" )
lowerCamelCase__: List[str] =convert_pytorch_state_dict_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
lowerCamelCase__: Optional[Any] =convert_pytorch_sharded_state_dict_to_flax(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return flax_state_dict
def lowerCAmelCase_ ( __a , __a , __a , __a , ) -> (Tuple[str], np.ndarray):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(__a ) -> bool:
return len(set(_SCREAMING_SNAKE_CASE ) & {key, (model_prefix,) + key} ) > 0
# layer norm
lowerCamelCase__: Dict =pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
lowerCamelCase__: Dict =pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
lowerCamelCase__: Optional[Any] =pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# embedding
lowerCamelCase__: str =pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCamelCase__: Union[str, Any] =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
lowerCamelCase__: Union[str, Any] =pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCamelCase__: int =pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_SCREAMING_SNAKE_CASE ):
lowerCamelCase__: Any =pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCamelCase__: str =pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCamelCase__: Optional[int] =pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
lowerCamelCase__: List[str] =None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
lowerCamelCase__: Union[str, Any] =pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
lowerCamelCase__: Any =pt_tuple_key[-2] + '_v'
if name is not None:
lowerCamelCase__: List[str] =pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: str ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase__: Tuple =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
lowerCamelCase__: Union[str, Any] =flax_model.params['params']
else:
lowerCamelCase__: int =flax_model.params
lowerCamelCase__: str =flatten_dict(_SCREAMING_SNAKE_CASE )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase__: List[str] =flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_SCREAMING_SNAKE_CASE )
lowerCamelCase__: Tuple ={}
lowerCamelCase__: int =(model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase__: Dict =(model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__: str =tuple(pt_key.split("." ) )
# remove base model prefix if necessary
lowerCamelCase__: Union[str, Any] =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase__: Any =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase__: str =rename_key_and_reshape_tensor(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# add model prefix if necessary
lowerCamelCase__: Dict =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase__: str =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
lowerCamelCase__: Union[str, Any] =jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase__: Optional[int] =jnp.asarray(_SCREAMING_SNAKE_CASE )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase__: Any =jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
import torch
# Load the index
lowerCamelCase__: Optional[Any] ={}
for shard_file in shard_filenames:
# load using msgpack utils
lowerCamelCase__: List[str] =torch.load(_SCREAMING_SNAKE_CASE )
lowerCamelCase__: str ={k: v.numpy() for k, v in pt_state_dict.items()}
lowerCamelCase__: int =flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
lowerCamelCase__: List[str] =flax_model.params['params']
lowerCamelCase__: str =flatten_dict(_SCREAMING_SNAKE_CASE )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
lowerCamelCase__: Any =flax_model.params
lowerCamelCase__: Optional[int] =flatten_dict(_SCREAMING_SNAKE_CASE )
lowerCamelCase__: List[str] =(model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
lowerCamelCase__: Dict =(model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCamelCase__: Tuple =tuple(pt_key.split("." ) )
# remove base model prefix if necessary
lowerCamelCase__: List[str] =pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase__: Optional[int] =pt_tuple_key[1:]
# Correctly rename weight parameters
lowerCamelCase__: Dict =rename_key_and_reshape_tensor(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# add model prefix if necessary
lowerCamelCase__: Dict =(model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase__: List[str] =(model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
lowerCamelCase__: Any =jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
if "var" in flax_key[-1]:
lowerCamelCase__: Tuple =jnp.asarray(_SCREAMING_SNAKE_CASE )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
continue
# also add unexpected weight so that warning is thrown
lowerCamelCase__: int =jnp.asarray(_SCREAMING_SNAKE_CASE )
else:
# also add unexpected weight so that warning is thrown
lowerCamelCase__: Optional[Any] =jnp.asarray(_SCREAMING_SNAKE_CASE )
return unflatten_dict(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =os.path.abspath(_SCREAMING_SNAKE_CASE )
logger.info(F"""Loading Flax weights from {flax_checkpoint_path}""" )
# import correct flax class
lowerCamelCase__: List[Any] =getattr(_SCREAMING_SNAKE_CASE , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_SCREAMING_SNAKE_CASE , "rb" ) as state_f:
try:
lowerCamelCase__: str =from_bytes(_SCREAMING_SNAKE_CASE , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F"""Unable to convert {flax_checkpoint_path} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( __a , __a ) -> List[str]:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
lowerCamelCase__: str =flatten_dict(jax.tree_util.tree_map(lambda __a : x.dtype == jnp.bfloataa , _SCREAMING_SNAKE_CASE ) ).values()
if any(_SCREAMING_SNAKE_CASE ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
lowerCamelCase__: Optional[int] =jax.tree_util.tree_map(
lambda __a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _SCREAMING_SNAKE_CASE )
lowerCamelCase__: Dict =flatten_dict(_SCREAMING_SNAKE_CASE )
lowerCamelCase__: int =pt_model.state_dict()
lowerCamelCase__: Union[str, Any] =(pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
lowerCamelCase__: List[str] =(pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
lowerCamelCase__: Any =[]
lowerCamelCase__: Union[str, Any] =set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCamelCase__: Tuple =flax_key_tuple[0] == pt_model.base_model_prefix
lowerCamelCase__: Tuple ='.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
lowerCamelCase__: Tuple =flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
lowerCamelCase__: Union[str, Any] =(pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_SCREAMING_SNAKE_CASE ) not in pt_model_dict:
# conv layer
lowerCamelCase__: List[Any] =flax_key_tuple[:-1] + ('weight',)
lowerCamelCase__: int =jnp.transpose(_SCREAMING_SNAKE_CASE , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_SCREAMING_SNAKE_CASE ) not in pt_model_dict:
# linear layer
lowerCamelCase__: List[str] =flax_key_tuple[:-1] + ('weight',)
lowerCamelCase__: int =flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase__: int =flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
lowerCamelCase__: Any =flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
lowerCamelCase__: str =flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
lowerCamelCase__: Optional[int] ='.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
lowerCamelCase__: Tuple ='.'.join(_SCREAMING_SNAKE_CASE )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
lowerCamelCase__: Tuple ={}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
lowerCamelCase__: List[str] =key.split("." )
lowerCamelCase__: Tuple =None
if key_components[-3::2] == ["parametrizations", "original0"]:
lowerCamelCase__: str =key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
lowerCamelCase__: List[str] =key_components[-2] + '_v'
if name is not None:
lowerCamelCase__: Optional[int] =key_components[:-3] + [name]
lowerCamelCase__: Optional[Any] ='.'.join(_SCREAMING_SNAKE_CASE )
lowerCamelCase__: Optional[Any] =key
if flax_key in special_pt_names:
lowerCamelCase__: Optional[int] =special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowerCamelCase__: List[Any] =np.asarray(_SCREAMING_SNAKE_CASE ) if not isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor
lowerCamelCase__: str =torch.from_numpy(_SCREAMING_SNAKE_CASE )
# remove from missing keys
missing_keys.remove(_SCREAMING_SNAKE_CASE )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_SCREAMING_SNAKE_CASE )
pt_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# re-transform missing_keys to list
lowerCamelCase__: Optional[int] =list(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F"""All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n""" )
if len(_SCREAMING_SNAKE_CASE ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
" use it for predictions and inference." )
else:
logger.warning(
F"""All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"""
"If your task is similar to the task the model of the checkpoint was trained on, "
F"""you can already use {pt_model.__class__.__name__} for predictions without further training.""" )
return pt_model
| 10 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__A = Lock()
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->List[Any]:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase__ :Any = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase__ :Tuple = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase__ :Optional[int] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase__ :Optional[int] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :str = []
lowerCAmelCase__ :Optional[Any] = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase__ :List[str] = Pipe()
lowerCAmelCase__ :List[Any] = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCAmelCase__ :Dict = temp_rs
lowerCAmelCase__ :Optional[Any] = temp_rr
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
lowerCAmelCase__ :Union[str, Any] = Pipe()
lowerCAmelCase__ :List[str] = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCAmelCase__ :Union[str, Any] = temp_rs
lowerCAmelCase__ :Any = temp_rr
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(
len(_SCREAMING_SNAKE_CASE ) - 1,
arr[len(_SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :str = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = odd_even_transposition(_SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 293 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase : Any =logging.get_logger(__name__)
lowerCAmelCase : List[str] ={
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class a_ ( _lowerCAmelCase ):
__A = "deformable_detr"
__A = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : Any , lowercase : List[Any]=True , lowercase : Dict=None , lowercase : Tuple=3 , lowercase : Optional[Any]=300 , lowercase : int=1_024 , lowercase : Tuple=6 , lowercase : str=1_024 , lowercase : Optional[int]=8 , lowercase : Optional[int]=6 , lowercase : List[Any]=1_024 , lowercase : List[str]=8 , lowercase : List[str]=0.0 , lowercase : Dict=True , lowercase : List[Any]="relu" , lowercase : Tuple=256 , lowercase : Union[str, Any]=0.1 , lowercase : List[str]=0.0 , lowercase : Dict=0.0 , lowercase : int=0.02 , lowercase : int=1.0 , lowercase : Union[str, Any]=True , lowercase : List[Any]=False , lowercase : Any="sine" , lowercase : Dict="resnet50" , lowercase : Union[str, Any]=True , lowercase : Dict=False , lowercase : List[Any]=4 , lowercase : Dict=4 , lowercase : Optional[int]=4 , lowercase : Optional[int]=False , lowercase : Dict=300 , lowercase : List[str]=False , lowercase : Optional[int]=1 , lowercase : Any=5 , lowercase : List[str]=2 , lowercase : Any=1 , lowercase : int=1 , lowercase : Optional[int]=5 , lowercase : str=2 , lowercase : Any=0.1 , lowercase : Any=0.25 , lowercase : List[str]=False , **lowercase : Union[str, Any] , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowercase_ :Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowercase , lowercase ):
lowercase_ :List[Any] = backbone_config.get("model_type" )
lowercase_ :Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
lowercase_ :Any = config_class.from_dict(lowercase )
lowercase_ :Dict = use_timm_backbone
lowercase_ :Tuple = backbone_config
lowercase_ :str = num_channels
lowercase_ :Any = num_queries
lowercase_ :Optional[Any] = max_position_embeddings
lowercase_ :Dict = d_model
lowercase_ :Optional[int] = encoder_ffn_dim
lowercase_ :int = encoder_layers
lowercase_ :Any = encoder_attention_heads
lowercase_ :Optional[Any] = decoder_ffn_dim
lowercase_ :List[str] = decoder_layers
lowercase_ :List[Any] = decoder_attention_heads
lowercase_ :str = dropout
lowercase_ :int = attention_dropout
lowercase_ :int = activation_dropout
lowercase_ :List[Any] = activation_function
lowercase_ :int = init_std
lowercase_ :Any = init_xavier_std
lowercase_ :List[Any] = encoder_layerdrop
lowercase_ :Optional[int] = auxiliary_loss
lowercase_ :str = position_embedding_type
lowercase_ :List[Any] = backbone
lowercase_ :Optional[Any] = use_pretrained_backbone
lowercase_ :Union[str, Any] = dilation
# deformable attributes
lowercase_ :str = num_feature_levels
lowercase_ :Any = encoder_n_points
lowercase_ :Any = decoder_n_points
lowercase_ :Union[str, Any] = two_stage
lowercase_ :Tuple = two_stage_num_proposals
lowercase_ :Optional[Any] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
lowercase_ :Any = class_cost
lowercase_ :List[str] = bbox_cost
lowercase_ :Tuple = giou_cost
# Loss coefficients
lowercase_ :Any = mask_loss_coefficient
lowercase_ :Tuple = dice_loss_coefficient
lowercase_ :Optional[int] = bbox_loss_coefficient
lowercase_ :Dict = giou_loss_coefficient
lowercase_ :Dict = eos_coefficient
lowercase_ :List[Any] = focal_alpha
lowercase_ :Any = disable_custom_kernels
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def lowercase__ ( self : str ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
return self.d_model
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowercase_ :Any = self.backbone_config.to_dict()
lowercase_ :Any = self.__class__.model_type
return output
| 147 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCAmelCase : Any =logging.get_logger(__name__)
def UpperCAmelCase_ ( __lowerCamelCase : List[str] ):
if isinstance(__lowerCamelCase ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCamelCase ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCamelCase ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class a_ ( _lowerCAmelCase ):
__A = ["pixel_values"]
def __init__( self : List[str] , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : bool = True , lowercase : Dict[str, int] = None , lowercase : bool = True , lowercase : Union[int, float] = 1 / 255 , lowercase : bool = True , lowercase : bool = True , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , **lowercase : Tuple , ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :Any = size if size is not None else {"shortest_edge": 256}
lowercase_ :int = get_size_dict(lowercase , default_to_square=lowercase )
lowercase_ :str = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowercase_ :List[str] = get_size_dict(lowercase , param_name="crop_size" )
lowercase_ :List[str] = do_resize
lowercase_ :Any = size
lowercase_ :Union[str, Any] = do_center_crop
lowercase_ :Union[str, Any] = crop_size
lowercase_ :Optional[Any] = resample
lowercase_ :List[str] = do_rescale
lowercase_ :List[Any] = rescale_factor
lowercase_ :Dict = offset
lowercase_ :Optional[Any] = do_normalize
lowercase_ :Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ :Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Union[str, Any] , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : PILImageResampling = PILImageResampling.BILINEAR , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Optional[Any] , ):
"""simple docstring"""
lowercase_ :List[Any] = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" in size:
lowercase_ :int = get_resize_output_image_size(lowercase , size["shortest_edge"] , default_to_square=lowercase )
elif "height" in size and "width" in size:
lowercase_ :Union[str, Any] = (size["height"], size["width"])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : str , lowercase : np.ndarray , lowercase : Dict[str, int] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : str , ):
"""simple docstring"""
lowercase_ :Any = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def lowercase__ ( self : List[str] , lowercase : np.ndarray , lowercase : Union[int, float] , lowercase : bool = True , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : List[str] , ):
"""simple docstring"""
lowercase_ :List[str] = image.astype(np.floataa )
if offset:
lowercase_ :List[str] = image - (scale / 2)
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : Tuple , lowercase : np.ndarray , lowercase : Union[float, List[float]] , lowercase : Union[float, List[float]] , lowercase : Optional[Union[str, ChannelDimension]] = None , **lowercase : Dict , ):
"""simple docstring"""
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def lowercase__ ( self : Tuple , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : bool = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
lowercase_ :Optional[int] = to_numpy_array(lowercase )
if do_resize:
lowercase_ :Tuple = self.resize(image=lowercase , size=lowercase , resample=lowercase )
if do_center_crop:
lowercase_ :Any = self.center_crop(lowercase , size=lowercase )
if do_rescale:
lowercase_ :Optional[Any] = self.rescale(image=lowercase , scale=lowercase , offset=lowercase )
if do_normalize:
lowercase_ :Tuple = self.normalize(image=lowercase , mean=lowercase , std=lowercase )
lowercase_ :Optional[Any] = to_channel_dimension_format(lowercase , lowercase )
return image
def lowercase__ ( self : Dict , lowercase : ImageInput , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : PILImageResampling = None , lowercase : bool = None , lowercase : Dict[str, int] = None , lowercase : bool = None , lowercase : float = None , lowercase : bool = None , lowercase : bool = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[float, List[float]]] = None , lowercase : Optional[Union[str, TensorType]] = None , lowercase : ChannelDimension = ChannelDimension.FIRST , **lowercase : Optional[int] , ):
"""simple docstring"""
lowercase_ :str = do_resize if do_resize is not None else self.do_resize
lowercase_ :Optional[Any] = resample if resample is not None else self.resample
lowercase_ :Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ :Dict = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ :Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ :Dict = offset if offset is not None else self.offset
lowercase_ :Tuple = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ :int = image_mean if image_mean is not None else self.image_mean
lowercase_ :Optional[int] = image_std if image_std is not None else self.image_std
lowercase_ :int = size if size is not None else self.size
lowercase_ :Optional[int] = get_size_dict(lowercase , default_to_square=lowercase )
lowercase_ :List[Any] = crop_size if crop_size is not None else self.crop_size
lowercase_ :List[str] = get_size_dict(lowercase , param_name="crop_size" )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
lowercase_ :List[str] = make_batched(lowercase )
lowercase_ :List[Any] = [
[
self._preprocess_image(
image=lowercase , do_resize=lowercase , size=lowercase , resample=lowercase , do_center_crop=lowercase , crop_size=lowercase , do_rescale=lowercase , rescale_factor=lowercase , offset=lowercase , do_normalize=lowercase , image_mean=lowercase , image_std=lowercase , data_format=lowercase , )
for img in video
]
for video in videos
]
lowercase_ :Optional[int] = {"pixel_values": videos}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 147 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='yolos'
def __init__(self , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_="gelu" , a_=0.0 , a_=0.0 , a_=0.02 , a_=1E-12 , a_=[5_12, 8_64] , a_=16 , a_=3 , a_=True , a_=1_00 , a_=True , a_=False , a_=1 , a_=5 , a_=2 , a_=5 , a_=2 , a_=0.1 , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Union[str, Any] = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : List[str] = hidden_act
__snake_case : Tuple = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = initializer_range
__snake_case : Tuple = layer_norm_eps
__snake_case : List[Any] = image_size
__snake_case : Tuple = patch_size
__snake_case : str = num_channels
__snake_case : Tuple = qkv_bias
__snake_case : Union[str, Any] = num_detection_tokens
__snake_case : List[str] = use_mid_position_embeddings
__snake_case : Tuple = auxiliary_loss
# Hungarian matcher
__snake_case : List[str] = class_cost
__snake_case : int = bbox_cost
__snake_case : int = giou_cost
# Loss coefficients
__snake_case : Optional[int] = bbox_loss_coefficient
__snake_case : List[str] = giou_loss_coefficient
__snake_case : List[Any] = eos_coefficient
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 1E-4
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 12
| 102 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE : List[Any] = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def lowercase ( _snake_case : Optional[int] , _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase ( _snake_case : List[str] ) ->Optional[int]:
"""simple docstring"""
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=_snake_case )
def lowercase ( _snake_case : Optional[Any] , _snake_case : Dict ) ->Any:
"""simple docstring"""
__snake_case : List[Any] = tmp_path_factory.getbasetemp() / '''cache'''
__snake_case : int = test_hf_cache_home / '''datasets'''
__snake_case : Tuple = test_hf_cache_home / '''metrics'''
__snake_case : List[str] = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(_snake_case ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(_snake_case ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(_snake_case ) )
__snake_case : Optional[int] = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(_snake_case ) )
__snake_case : Tuple = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_snake_case ) )
@pytest.fixture(autouse=_snake_case , scope='''session''' )
def lowercase ( ) ->Any:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_snake_case )
def lowercase ( _snake_case : Tuple ) ->Union[str, Any]:
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , _snake_case )
@pytest.fixture
def lowercase ( _snake_case : Any ) ->Optional[Any]:
"""simple docstring"""
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , _snake_case )
| 102 | 1 |
from __future__ import annotations
def __magic_name__ ( __lowerCAmelCase : list ) -> float:
if not nums:
raise ValueError('''List is empty''' )
return sum(__lowerCAmelCase ) / len(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase__ ( __lowercase , unittest.TestCase ):
a__ : str = ShapEImgaImgPipeline
a__ : Union[str, Any] = ["""image"""]
a__ : Optional[int] = ["""image"""]
a__ : Union[str, Any] = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
a__ : List[str] = False
@property
def __A ( self : Dict ) -> Optional[Any]:
return 32
@property
def __A ( self : Optional[int] ) -> Optional[int]:
return 32
@property
def __A ( self : Optional[int] ) -> List[Any]:
return self.time_input_dim * 4
@property
def __A ( self : str ) -> List[Any]:
return 8
@property
def __A ( self : Optional[Any] ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCamelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCamelCase = CLIPVisionModel(SCREAMING_SNAKE_CASE__ )
return model
@property
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=2_24 , )
return image_processor
@property
def __A ( self : Dict ) -> int:
torch.manual_seed(0 )
__lowerCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''embedding_proj_norm_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__lowerCamelCase = PriorTransformer(**SCREAMING_SNAKE_CASE__ )
return model
@property
def __A ( self : Tuple ) -> Dict:
torch.manual_seed(0 )
__lowerCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**SCREAMING_SNAKE_CASE__ )
return model
def __A ( self : Optional[int] ) -> List[str]:
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_image_encoder
__lowerCamelCase = self.dummy_image_processor
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=SCREAMING_SNAKE_CASE__ , clip_sample=SCREAMING_SNAKE_CASE__ , clip_sample_range=1.0 , )
__lowerCamelCase = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''image_processor''': image_processor,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=0 ) -> int:
__lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
__lowerCamelCase = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = {
'''image''': input_image,
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __A ( self : Union[str, Any] ) -> Dict:
__lowerCamelCase = '''cpu'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self : str ) -> Tuple:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __A ( self : Optional[Any] ) -> str:
__lowerCamelCase = torch_device == '''cpu'''
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=SCREAMING_SNAKE_CASE__ , relax_max_difference=SCREAMING_SNAKE_CASE__ , )
def __A ( self : Dict ) -> Optional[int]:
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : str ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : str ) -> Union[str, Any]:
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/shap_e/corgi.png''' )
__lowerCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_img2img_out.npy''' )
__lowerCamelCase = ShapEImgaImgPipeline.from_pretrained('''openai/shap-e-img2img''' )
__lowerCamelCase = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 )
__lowerCamelCase = pipe(
SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 339 | 0 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase=1024 , lowercase=1024 , lowercase=3.6 ) -> Tuple:
'''simple docstring'''
A__ = tokenizer
A__ = tokenizer.bos_token_id
A__ = dataset
A__ = seq_length
A__ = seq_length * chars_per_token * num_of_sequences
def __iter__( self ) -> Tuple:
'''simple docstring'''
A__ = iter(self.dataset )
A__ = True
while more_examples:
A__ , A__ = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowercase )["content"] )
buffer_len += len(buffer[-1] )
except StopIteration:
A__ = False
break
A__ = tokenizer(lowercase , truncation=lowercase )["input_ids"]
A__ = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowercase ) , self.seq_length ):
A__ = all_token_ids[i : i + self.seq_length]
if len(lowercase ) == self.seq_length:
yield torch.tensor(lowercase )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> List[str]:
'''simple docstring'''
A__ = {"streaming": True}
A__ = load_dataset(args.dataset_name , split="train" , **SCREAMING_SNAKE_CASE_ )
A__ = ConstantLengthDataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , seq_length=args.seq_length )
A__ = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=args.batch_size )
return eval_dataloader
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] ) -> int:
'''simple docstring'''
model.eval()
A__ = []
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
with torch.no_grad():
A__ = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
A__ = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(SCREAMING_SNAKE_CASE_ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
A__ = torch.mean(torch.cat(SCREAMING_SNAKE_CASE_ ) )
try:
A__ = torch.exp(SCREAMING_SNAKE_CASE_ )
except OverflowError:
A__ = float("inf" )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase__ = Accelerator()
# Parse configuration
lowerCAmelCase__ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase__ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase__ = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase__ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
lowerCAmelCase__ , lowerCAmelCase__ = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 68 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
@property
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Any =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple =self.dummy_uncond_unet
UpperCAmelCase : Optional[int] =KarrasVeScheduler()
UpperCAmelCase : List[Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : List[str] =torch.manual_seed(0 )
UpperCAmelCase : List[str] =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' ).images
UpperCAmelCase : str =torch.manual_seed(0 )
UpperCAmelCase : str =pipe(num_inference_steps=2 , generator=snake_case__ , output_type='''numpy''' , return_dict=snake_case__ )[0]
UpperCAmelCase : Any =image[0, -3:, -3:, -1]
UpperCAmelCase : List[str] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : int =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple ='''google/ncsnpp-celebahq-256'''
UpperCAmelCase : int =UNetaDModel.from_pretrained(snake_case__ )
UpperCAmelCase : Dict =KarrasVeScheduler()
UpperCAmelCase : Union[str, Any] =KarrasVePipeline(unet=snake_case__ , scheduler=snake_case__ )
pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
UpperCAmelCase : Any =torch.manual_seed(0 )
UpperCAmelCase : Tuple =pipe(num_inference_steps=20 , generator=snake_case__ , output_type='''numpy''' ).images
UpperCAmelCase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Tuple =np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 348 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A_ : str = logging.get_logger(__name__)
A_ : Optional[int] = "▁"
A_ : Dict = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"}
A_ : List[str] = {
"vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model",
},
"monolingual_vocab_file": {
"vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt",
},
}
A_ : Tuple = {"vinai/bartpho-syllable": 10_24}
class a_ ( snake_case_ ):
'''simple docstring'''
lowerCamelCase__ : Tuple = VOCAB_FILES_NAMES
lowerCamelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Any = ['input_ids', 'attention_mask']
def __init__(self, lowerCamelCase_, lowerCamelCase_, lowerCamelCase_="<s>", lowerCamelCase_="</s>", lowerCamelCase_="</s>", lowerCamelCase_="<s>", lowerCamelCase_="<unk>", lowerCamelCase_="<pad>", lowerCamelCase_="<mask>", lowerCamelCase_ = None, **lowerCamelCase_, ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = AddedToken(lowerCamelCase_, lstrip=lowerCamelCase_, rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_, lowerCamelCase_ ) else mask_token
lowerCamelCase__ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase_, eos_token=lowerCamelCase_, unk_token=lowerCamelCase_, sep_token=lowerCamelCase_, cls_token=lowerCamelCase_, pad_token=lowerCamelCase_, mask_token=lowerCamelCase_, sp_model_kwargs=self.sp_model_kwargs, **lowerCamelCase_, )
lowerCamelCase__ : int = vocab_file
lowerCamelCase__ : List[Any] = monolingual_vocab_file
lowerCamelCase__ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase_ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : List[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ : List[Any] = cnt
cnt += 1
with open(lowerCamelCase_, 'r', encoding='utf-8' ) as f:
for line in f.readlines():
lowerCamelCase__ : Tuple = line.strip().split()[0]
lowerCamelCase__ : Any = len(self.fairseq_tokens_to_ids )
if str(lowerCamelCase_ ) not in self.fairseq_tokens_to_ids:
lowerCamelCase__ : Optional[Any] = len(self.fairseq_tokens_to_ids )
lowerCamelCase__ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = self.__dict__.copy()
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
return state
def __setstate__(self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
lowerCamelCase__ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None, lowerCamelCase_ = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_, token_ids_a=lowerCamelCase_, already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = [self.sep_token_id]
lowerCamelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ (self ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = {self.convert_ids_to_tokens(lowerCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase_, out_type=lowerCamelCase_ )
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def a__ (self, lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase__ : str = ''.join(lowerCamelCase_ ).replace(lowerCamelCase_, ' ' ).strip()
return out_string
def a__ (self, lowerCamelCase_, lowerCamelCase_ = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ : List[str] = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase__ : str = os.path.join(
lowerCamelCase_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'], )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowerCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase_, 'wb' ) as fi:
lowerCamelCase__ : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
lowerCamelCase_ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file, lowerCamelCase_ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(lowerCamelCase_, 'w', encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(lowerCamelCase_ )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 316 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316 | 1 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
A: List[str] = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
A: Dict = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
A: List[Any] = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[int] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCAmelCase : List[str] = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCAmelCase : List[Any] = evaluate(dataset=__A , predictions=__A )
return score
| 109 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any =["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE_ : List[Any] ="BlipImageProcessor"
SCREAMING_SNAKE_CASE_ : Optional[int] =("BertTokenizer", "BertTokenizerFast")
def __init__( self : Dict , __A : Optional[int] , __A : List[Any] ):
__UpperCamelCase = False
super().__init__(__A , __A )
__UpperCamelCase = self.image_processor
def __call__( self : List[Any] , __A : ImageInput = None , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
__UpperCamelCase = self.tokenizer
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
return text_encoding
# add pixel_values
__UpperCamelCase = self.image_processor(__A , return_tensors=__A )
if text is not None:
__UpperCamelCase = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
else:
__UpperCamelCase = None
if text_encoding is not None:
encoding_image_processor.update(__A )
return encoding_image_processor
def _lowerCamelCase ( self : List[Any] , *__A : Dict , **__A : Optional[int] ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowerCamelCase ( self : List[Any] , *__A : List[str] , **__A : Dict ):
return self.tokenizer.decode(*__A , **__A )
@property
def _lowerCamelCase ( self : Tuple ):
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 53 | 0 |
'''simple docstring'''
__a = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 100_0000,
"gigajoule": 10_0000_0000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 360_0000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 418_6800.00,
"electronvolt": 1.602176634E-19,
"britishthermalunit_it": 1055.0_5585,
"footpound": 1.355818,
}
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
snake_case__ : Any = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(_lowerCAmelCase )}"
)
raise ValueError(_lowerCAmelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43 |
'''simple docstring'''
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class UpperCAmelCase_ :
"""simple docstring"""
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Optional[int] ):
raise NotImplementedError()
def lowerCamelCase ( self : Optional[int] ):
raise NotImplementedError()
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Tuple , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , **snake_case_ : Tuple ):
snake_case__ : Tuple = tokenizer
snake_case__ : List[str] = skip_prompt
snake_case__ : Optional[int] = decode_kwargs
# variables used in the streaming process
snake_case__ : Optional[int] = []
snake_case__ : Optional[int] = 0
snake_case__ : List[Any] = True
def lowerCamelCase ( self : List[str] , snake_case_ : int ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
snake_case__ : Optional[Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
snake_case__ : List[Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
snake_case__ : Tuple = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
snake_case__ : int = text[self.print_len :]
snake_case__ : Optional[int] = []
snake_case__ : int = 0
# If the last token is a CJK character, we print the characters.
elif len(snake_case_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
snake_case__ : str = text[self.print_len :]
self.print_len += len(snake_case_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
snake_case__ : Dict = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(snake_case_ )
self.on_finalized_text(snake_case_ )
def lowerCamelCase ( self : int ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
snake_case__ : Union[str, Any] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
snake_case__ : Optional[Any] = text[self.print_len :]
snake_case__ : Tuple = []
snake_case__ : int = 0
else:
snake_case__ : int = """"""
snake_case__ : Union[str, Any] = True
self.on_finalized_text(snake_case_ , stream_end=snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : str , snake_case_ : bool = False ):
print(snake_case_ , flush=snake_case_ , end="""""" if not stream_end else None )
def lowerCamelCase ( self : int , snake_case_ : Optional[int] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF) #
or (cp >= 0x20000 and cp <= 0x2A6DF) #
or (cp >= 0x2A700 and cp <= 0x2B73F) #
or (cp >= 0x2B740 and cp <= 0x2B81F) #
or (cp >= 0x2B820 and cp <= 0x2CEAF) #
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F) #
): #
return True
return False
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case_ : "AutoTokenizer" , snake_case_ : bool = False , snake_case_ : Optional[float] = None , **snake_case_ : List[Any] ):
super().__init__(snake_case_ , snake_case_ , **snake_case_ )
snake_case__ : Dict = Queue()
snake_case__ : List[Any] = None
snake_case__ : int = timeout
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : bool = False ):
self.text_queue.put(snake_case_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : List[str] ):
return self
def lowerCamelCase ( self : str ):
snake_case__ : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 43 | 1 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__UpperCAmelCase = object()
# For specifying empty leaf dict `{}`
__UpperCAmelCase = object()
def lowercase__ ( __snake_case : List[Any] , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(__snake_case ) - len(__snake_case ) + 1 ):
UpperCAmelCase_ : Any = [x.match(__snake_case ) for x, y in zip(__snake_case , ks[i:] )]
if matches and all(__snake_case ):
return True
return False
def lowercase__ ( __snake_case : Optional[Any] ):
'''simple docstring'''
def replace(__snake_case : List[Any] , __snake_case : Optional[Any] ):
for rule, replacement in rules:
if _match(__snake_case , __snake_case ):
return replacement
return val
return replace
def lowercase__ ( ):
'''simple docstring'''
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , __snake_case )),
(("transformer", "wte", "embedding"), P('mp' , __snake_case )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__snake_case , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , __snake_case )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__snake_case , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , __snake_case )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowercase__ ( __snake_case : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Dict = _get_partition_rules()
UpperCAmelCase_ : Optional[Any] = _replacement_rules(__snake_case )
UpperCAmelCase_ : Dict = {k: _unmatched for k in flatten_dict(__snake_case )}
UpperCAmelCase_ : Optional[Any] = {k: replace(__snake_case , __snake_case ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__snake_case ) )
| 29 | '''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[int] , lowercase_ : Any=13 , lowercase_ : List[str]=7 , lowercase_ : List[Any]=True , lowercase_ : str=True , lowercase_ : Dict=True , lowercase_ : List[str]=True , lowercase_ : List[str]=99 , lowercase_ : Dict=32 , lowercase_ : List[Any]=5 , lowercase_ : List[str]=4 , lowercase_ : Dict=37 , lowercase_ : List[Any]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Any=0.1 , lowercase_ : int=512 , lowercase_ : Tuple=16 , lowercase_ : str=2 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Any=3 , lowercase_ : Any=4 , lowercase_ : Dict=None , ):
lowercase_ : Tuple = parent
lowercase_ : Tuple = batch_size
lowercase_ : Optional[int] = seq_length
lowercase_ : Union[str, Any] = is_training
lowercase_ : int = use_input_mask
lowercase_ : Union[str, Any] = use_token_type_ids
lowercase_ : Tuple = use_labels
lowercase_ : Tuple = vocab_size
lowercase_ : int = hidden_size
lowercase_ : int = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : int = hidden_dropout_prob
lowercase_ : Union[str, Any] = attention_probs_dropout_prob
lowercase_ : List[Any] = max_position_embeddings
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : List[Any] = type_sequence_label_size
lowercase_ : Optional[int] = initializer_range
lowercase_ : str = num_labels
lowercase_ : int = num_choices
lowercase_ : List[Any] = scope
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : str = None
if self.use_input_mask:
lowercase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Optional[int] = None
if self.use_token_type_ids:
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : str = None
lowercase_ : Optional[int] = None
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : int ):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Optional[int] ):
lowercase_ : Optional[Any] = NystromformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
lowercase_ : Optional[Any] = model(lowercase_ , token_type_ids=lowercase_ )
lowercase_ : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any ):
lowercase_ : List[Any] = NystromformerForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Tuple ):
lowercase_ : Any = NystromformerForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : int ):
lowercase_ : Any = self.num_labels
lowercase_ : Union[str, Any] = NystromformerForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : int = self.num_labels
lowercase_ : int = NystromformerForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] ):
lowercase_ : str = self.num_choices
lowercase_ : Union[str, Any] = NystromformerForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : Union[str, Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Tuple = config_and_inputs
lowercase_ : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Any = NystromformerModelTester(self )
lowercase_ : Optional[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : int = type
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = NystromformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : List[str] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
lowercase_ : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
lowercase_ : Tuple = model(lowercase_ )[0]
lowercase_ : Tuple = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , lowercase_ )
lowercase_ : Dict = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = """the [MASK] of Belgium is Brussels"""
lowercase_ : Optional[Any] = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
lowercase_ : List[Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
lowercase_ : str = tokenizer(lowercase_ , return_tensors="""pt""" )
with torch.no_grad():
lowercase_ : Tuple = model(encoding.input_ids ).logits
lowercase_ : Optional[int] = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(lowercase_ ) , """capital""" )
| 239 | 0 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def a_ ( __lowercase : Dict ) -> int:
_snake_case = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def a_ ( __lowercase : List[str] ) -> str:
_snake_case , _snake_case = emb.weight.shape
_snake_case = nn.Linear(__lowercase , __lowercase , bias=__lowercase )
_snake_case = emb.weight.data
return lin_layer
def a_ ( __lowercase : Optional[int] , __lowercase : Optional[Any]="facebook/mbart-large-en-ro" , __lowercase : Union[str, Any]=False , __lowercase : Optional[Any]=False ) -> int:
_snake_case = torch.load(__lowercase , map_location='cpu' )['model']
remove_ignore_keys_(__lowercase )
_snake_case = state_dict['encoder.embed_tokens.weight'].shape[0]
_snake_case = MBartConfig.from_pretrained(__lowercase , vocab_size=__lowercase )
if mbart_aa and finetuned:
_snake_case = 'relu'
_snake_case = state_dict['decoder.embed_tokens.weight']
_snake_case = MBartForConditionalGeneration(__lowercase )
model.model.load_state_dict(__lowercase )
if finetuned:
_snake_case = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase : Tuple = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 130 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a_ ( __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : int , __lowercase : int ) -> np.ndarray:
_snake_case = cva.getAffineTransform(__lowercase , __lowercase )
return cva.warpAffine(__lowercase , __lowercase , (rows, cols) )
if __name__ == "__main__":
# read original image
_lowerCamelCase : Optional[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
_lowerCamelCase : List[str] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_lowerCamelCase , _lowerCamelCase : List[Any] = gray_img.shape
# set different points to rotate image
_lowerCamelCase : str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
_lowerCamelCase : Optional[Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
_lowerCamelCase : List[str] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
_lowerCamelCase : Dict = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
_lowerCamelCase : int = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_lowerCamelCase : Any = plt.figure(1)
_lowerCamelCase : List[Any] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show() | 130 | 1 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
lowercase_ = Mapping[str, np.ndarray]
lowercase_ = Mapping[str, Any] # Is a nested dict.
lowercase_ = 0.01
@dataclasses.dataclass(frozen=_UpperCAmelCase )
class A :
"""simple docstring"""
lowerCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowerCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowerCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowerCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowerCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowerCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowerCamelCase = None
# Templates used to generate this protein (prediction-only)
lowerCamelCase = None
# Chain corresponding to each parent
lowerCamelCase = None
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> Protein:
'''simple docstring'''
A__ = R'(\[[A-Z]+\]\n)'
A__ = [tag.strip() for tag in re.split(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if len(SCREAMING_SNAKE_CASE__ ) > 0]
A__ = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
A__ = ["N", "CA", "C"]
A__ = None
A__ = None
A__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
A__ = g[1][0].strip()
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if seq[i] not in residue_constants.restypes:
A__ = 'X' # FIXME: strings are immutable
A__ = np.array(
[residue_constants.restype_order.get(SCREAMING_SNAKE_CASE__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
A__ = []
for axis in range(3 ):
tertiary.append(list(map(SCREAMING_SNAKE_CASE__ , g[1][axis].split() ) ) )
A__ = np.array(SCREAMING_SNAKE_CASE__ )
A__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(SCREAMING_SNAKE_CASE__ ):
A__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
A__ = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
A__ = np.zeros(
(
len(SCREAMING_SNAKE_CASE__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(SCREAMING_SNAKE_CASE__ ):
A__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=SCREAMING_SNAKE_CASE__ , atom_mask=SCREAMING_SNAKE_CASE__ , aatype=SCREAMING_SNAKE_CASE__ , residue_index=np.arange(len(SCREAMING_SNAKE_CASE__ ) ) , b_factors=SCREAMING_SNAKE_CASE__ , )
def _snake_case( SCREAMING_SNAKE_CASE__ : Protein , SCREAMING_SNAKE_CASE__ : int = 0 ) -> List[str]:
'''simple docstring'''
A__ = []
A__ = prot.remark
if remark is not None:
pdb_headers.append(f'REMARK {remark}' )
A__ = prot.parents
A__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
A__ = [p for i, p in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if i == chain_id]
if parents is None or len(SCREAMING_SNAKE_CASE__ ) == 0:
A__ = ['N/A']
pdb_headers.append(f'PARENT {" ".join(SCREAMING_SNAKE_CASE__ )}' )
return pdb_headers
def _snake_case( SCREAMING_SNAKE_CASE__ : Protein , SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
A__ = []
A__ = pdb_str.split('\n' )
A__ = prot.remark
if remark is not None:
out_pdb_lines.append(f'REMARK {remark}' )
A__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
A__ = []
if prot.parents_chain_index is not None:
A__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(SCREAMING_SNAKE_CASE__ ) , [] )
parent_dict[str(SCREAMING_SNAKE_CASE__ )].append(SCREAMING_SNAKE_CASE__ )
A__ = max([int(SCREAMING_SNAKE_CASE__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
A__ = parent_dict.get(str(SCREAMING_SNAKE_CASE__ ) , ['N/A'] )
parents_per_chain.append(SCREAMING_SNAKE_CASE__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
A__ = [['N/A']]
def make_parent_line(SCREAMING_SNAKE_CASE__ : Sequence[str] ) -> str:
return f'PARENT {" ".join(SCREAMING_SNAKE_CASE__ )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
A__ = 0
for i, l in enumerate(SCREAMING_SNAKE_CASE__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(SCREAMING_SNAKE_CASE__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(SCREAMING_SNAKE_CASE__ ):
A__ = parents_per_chain[chain_counter]
else:
A__ = ['N/A']
out_pdb_lines.append(make_parent_line(SCREAMING_SNAKE_CASE__ ) )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Protein ) -> str:
'''simple docstring'''
A__ = residue_constants.restypes + ['X']
def res_atoa(SCREAMING_SNAKE_CASE__ : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
A__ = residue_constants.atom_types
A__ = []
A__ = prot.atom_mask
A__ = prot.aatype
A__ = prot.atom_positions
A__ = prot.residue_index.astype(np.intaa )
A__ = prot.b_factors
A__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
A__ = get_pdb_headers(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
pdb_lines.extend(SCREAMING_SNAKE_CASE__ )
A__ = aatype.shape[0]
A__ = 1
A__ = 0
A__ = string.ascii_uppercase
A__ = None
# Add all atom sites.
for i in range(SCREAMING_SNAKE_CASE__ ):
A__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(SCREAMING_SNAKE_CASE__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
A__ = 'ATOM'
A__ = atom_name if len(SCREAMING_SNAKE_CASE__ ) == 4 else f' {atom_name}'
A__ = ''
A__ = ''
A__ = 1.00
A__ = atom_name[0] # Protein supports only C, N, O, S, this works.
A__ = ''
A__ = 'A'
if chain_index is not None:
A__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
A__ = (
f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
f'{res_name_a:>3} {chain_tag:>1}'
f'{residue_index[i]:>4}{insertion_code:>1} '
f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
f'{occupancy:>6.2f}{b_factor:>6.2f} '
f'{element:>2}{charge:>2}'
)
pdb_lines.append(SCREAMING_SNAKE_CASE__ )
atom_index += 1
A__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
A__ = True
A__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
A__ = 'TER'
A__ = (
f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(SCREAMING_SNAKE_CASE__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ : Protein ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _snake_case( SCREAMING_SNAKE_CASE__ : FeatureDict , SCREAMING_SNAKE_CASE__ : ModelOutput , SCREAMING_SNAKE_CASE__ : Optional[np.ndarray] = None , SCREAMING_SNAKE_CASE__ : Optional[np.ndarray] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[Sequence[str]] = None , SCREAMING_SNAKE_CASE__ : Optional[Sequence[int]] = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=SCREAMING_SNAKE_CASE__ , remark=SCREAMING_SNAKE_CASE__ , parents=SCREAMING_SNAKE_CASE__ , parents_chain_index=SCREAMING_SNAKE_CASE__ , )
| 7 |
from typing import Union
import fire
import torch
from tqdm import tqdm
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str = "cpu" , SCREAMING_SNAKE_CASE__ : Union[str, None] = None ) -> None:
'''simple docstring'''
A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
A__ = v.half()
if save_path is None: # overwrite src_path
A__ = src_path
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
fire.Fire(convert)
| 7 | 1 |
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class _a ( _A ):
def __lt__(self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self[-1] < other[-1]
def __eq__(self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
return self[-1] == other[-1]
def lowerCAmelCase_ (lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = []
# sort into stacks
for element in collection:
UpperCAmelCase_: Dict = Stack([element] )
UpperCAmelCase_: List[Any] = bisect_left(lowercase__ , lowercase__ )
if i != len(lowercase__ ):
stacks[i].append(lowercase__ )
else:
stacks.append(lowercase__ )
# use a heap-based merge to merge stack efficiently
UpperCAmelCase_: Optional[Any] = merge(*(reversed(lowercase__ ) for stack in stacks) )
return collection
if __name__ == "__main__":
a : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
a : List[Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 363 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
a : str = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def lowerCAmelCase_ (lowerCAmelCase__: List[Any] , lowerCAmelCase__: Optional[Any] , lowerCAmelCase__: Union[str, Any] , lowerCAmelCase__: Dict=None ):
"""simple docstring"""
UpperCAmelCase_: Any = XLNetConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase_: int = finetuning_task.lower() if finetuning_task is not None else """"""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F'Building PyTorch XLNetForSequenceClassification model from configuration: {config}' )
UpperCAmelCase_: Optional[int] = finetuning_task
UpperCAmelCase_: int = GLUE_TASKS_NUM_LABELS[finetuning_task]
UpperCAmelCase_: Optional[Any] = XLNetForSequenceClassification(lowerCAmelCase__ )
elif "squad" in finetuning_task:
UpperCAmelCase_: List[Any] = finetuning_task
UpperCAmelCase_: Optional[Any] = XLNetForQuestionAnswering(lowerCAmelCase__ )
else:
UpperCAmelCase_: Tuple = XLNetLMHeadModel(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase_: Tuple = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase_: List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
print(F'Save PyTorch model to {os.path.abspath(lowerCAmelCase__ )}' )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F'Save configuration file to {os.path.abspath(lowerCAmelCase__ )}' )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
a : List[str] = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 82 | 0 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCAmelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCAmelCase = '''main'''
# Default branch name
lowerCAmelCase = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
lowerCAmelCase = '''aaaaaaa'''
# This commit does not exist, so we should 404.
lowerCAmelCase = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCAmelCase = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def _a ( ):
"""simple docstring"""
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def _a ( ):
"""simple docstring"""
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''' ) is not None
class _a ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: int ) -> Any:
"""simple docstring"""
with ContextManagers([] ):
print('''Transformers are awesome!''' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
with ContextManagers([context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''' )
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO )
def lowerCamelCase_ ( self: str , UpperCamelCase_: Tuple ) -> Tuple:
"""simple docstring"""
with ContextManagers([context_fr(), context_en()] ):
print('''Transformers are awesome!''' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''' )
@require_torch
def lowerCamelCase_ ( self: int ) -> Tuple:
"""simple docstring"""
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''start_positions''', '''end_positions'''] )
class _a ( a__ ):
pass
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
@require_tf
def lowerCamelCase_ ( self: str ) -> Any:
"""simple docstring"""
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels''', '''next_sentence_label'''] )
self.assertEqual(find_labels(_lowerCamelCase ) , ['''start_positions''', '''end_positions'''] )
class _a ( a__ ):
pass
self.assertEqual(find_labels(_lowerCamelCase ) , ['''labels'''] )
@require_flax
def lowerCamelCase_ ( self: Optional[int] ) -> str:
"""simple docstring"""
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
class _a ( a__ ):
pass
self.assertEqual(find_labels(_lowerCamelCase ) , [] )
| 110 |
import csv
import tweepy
# Twitter API credentials
a__ : Union[str, Any] = ''''''
a__ : List[str] = ''''''
a__ : Any = ''''''
a__ : List[str] = ''''''
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = tweepy.OAuthHandler(a__ , a__ )
auth.set_access_token(a__ , a__ )
SCREAMING_SNAKE_CASE : List[str] = tweepy.API(a__ )
# initialize a list to hold all the tweepy Tweets
SCREAMING_SNAKE_CASE : Any = []
# make initial request for most recent tweets (200 is the maximum allowed count)
SCREAMING_SNAKE_CASE : List[Any] = api.user_timeline(screen_name=a__ , count=200 )
# save most recent tweets
alltweets.extend(a__ )
# save the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Tuple = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(a__ ) > 0:
print(F"""getting tweets before {oldest}""" )
# all subsequent requests use the max_id param to prevent duplicates
SCREAMING_SNAKE_CASE : Any = api.user_timeline(
screen_name=a__ , count=200 , max_id=a__ )
# save most recent tweets
alltweets.extend(a__ )
# update the id of the oldest tweet less one
SCREAMING_SNAKE_CASE : Dict = alltweets[-1].id - 1
print(F"""...{len(a__ )} tweets downloaded so far""" )
# transform the tweepy tweets into a 2D array that will populate the csv
SCREAMING_SNAKE_CASE : Optional[Any] = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"""new_{screen_name}_tweets.csv""" , '''w''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = csv.writer(a__ )
writer.writerow(['''id''', '''created_at''', '''text'''] )
writer.writerows(a__ )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets('''FirePing32''')
| 313 | 0 |
'''simple docstring'''
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
a : Any = logging.get_logger(__name__)
a : Union[str, Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
a : List[str] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
a : Optional[int] = {
'facebook/blenderbot_small-90M': 512,
}
class a ( _lowerCamelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BlenderbotSmallTokenizer
def __init__( self : str , lowercase_ : List[Any]=None , lowercase_ : List[str]=None , lowercase_ : List[Any]="<|endoftext|>" , lowercase_ : Optional[int]="<|endoftext|>" , lowercase_ : Any="<|endoftext|>" , lowercase_ : int=False , lowercase_ : str=True , **lowercase_ : List[str] , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=lowercase_ , merges=lowercase_ , add_prefix_space=lowercase_ , trim_offsets=lowercase_ , ) , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , **lowercase_ , )
snake_case_ = add_prefix_space
def A_ ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Dict=None ):
snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A_ ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 72 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class a ( _lowerCamelCase ):
snake_case_ = 42
snake_case_ = None
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=0.9_9_9, __UpperCAmelCase="cosine", ) -> Dict:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
snake_case_ = []
for i in range(__UpperCAmelCase ):
snake_case_ = i / num_diffusion_timesteps
snake_case_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ), __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase, dtype=torch.floataa )
class a ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self : List[str] , lowercase_ : int = 1000 , lowercase_ : str = "fixed_small_log" , lowercase_ : bool = True , lowercase_ : Optional[float] = 1.0 , lowercase_ : str = "epsilon" , lowercase_ : str = "squaredcos_cap_v2" , ):
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' )
snake_case_ = betas_for_alpha_bar(lowercase_ )
snake_case_ = 1.0 - self.betas
snake_case_ = torch.cumprod(self.alphas , dim=0 )
snake_case_ = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
snake_case_ = 1.0
# setable values
snake_case_ = None
snake_case_ = torch.from_numpy(np.arange(0 , lowercase_ )[::-1].copy() )
snake_case_ = variance_type
def A_ ( self : Optional[Any] , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None ):
return sample
def A_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Union[str, torch.device] = None ):
snake_case_ = num_inference_steps
snake_case_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
snake_case_ = (np.arange(0 , lowercase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
snake_case_ = torch.from_numpy(lowercase_ ).to(lowercase_ )
def A_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int]=None , lowercase_ : Tuple=None , lowercase_ : Tuple=None ):
if prev_timestep is None:
snake_case_ = t - 1
snake_case_ = self.alphas_cumprod[t]
snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case_ = 1 - alpha_prod_t
snake_case_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case_ = self.betas[t]
else:
snake_case_ = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case_ = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
snake_case_ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
snake_case_ = torch.log(torch.clamp(lowercase_ , min=1e-20 ) )
snake_case_ = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
snake_case_ = variance.log()
snake_case_ = beta.log()
snake_case_ = (predicted_variance + 1) / 2
snake_case_ = frac * max_log + (1 - frac) * min_log
return variance
def A_ ( self : List[Any] , lowercase_ : torch.FloatTensor , lowercase_ : int , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None , lowercase_ : int=None , lowercase_ : bool = True , ):
snake_case_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
snake_case_ ,snake_case_ = torch.split(lowercase_ , sample.shape[1] , dim=1 )
else:
snake_case_ = None
# 1. compute alphas, betas
if prev_timestep is None:
snake_case_ = t - 1
snake_case_ = self.alphas_cumprod[t]
snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
snake_case_ = 1 - alpha_prod_t
snake_case_ = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
snake_case_ = self.betas[t]
snake_case_ = self.alphas[t]
else:
snake_case_ = 1 - alpha_prod_t / alpha_prod_t_prev
snake_case_ = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case_ = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
''' for the UnCLIPScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case_ = torch.clamp(
lowercase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
snake_case_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case_ = 0
if t > 0:
snake_case_ = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowercase_ , device=model_output.device )
snake_case_ = self._get_variance(
lowercase_ , predicted_variance=lowercase_ , prev_timestep=lowercase_ , )
if self.variance_type == "fixed_small_log":
snake_case_ = variance
elif self.variance_type == "learned_range":
snake_case_ = (0.5 * variance).exp()
else:
raise ValueError(
F"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
''' for the UnCLIPScheduler.''' )
snake_case_ = variance * variance_noise
snake_case_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowercase_ , pred_original_sample=lowercase_ )
def A_ ( self : Any , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : torch.IntTensor , ):
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
snake_case_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
snake_case_ = timesteps.to(original_samples.device )
snake_case_ = alphas_cumprod[timesteps] ** 0.5
snake_case_ = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
snake_case_ = sqrt_alpha_prod.unsqueeze(-1 )
snake_case_ = (1 - alphas_cumprod[timesteps]) ** 0.5
snake_case_ = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
snake_case_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
snake_case_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 72 | 1 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return int(input_a == input_a == 0 )
def _UpperCamelCase ( ):
'''simple docstring'''
print("""Truth Table of NOR Gate:""" )
print("""| Input 1 | Input 2 | Output |""" )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 346 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
UpperCAmelCase_ = logging.getLogger()
@unittest.skip("""Temporarily disable the doc tests.""" )
@require_torch
@require_tf
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Path , _UpperCAmelCase : Union[str, None] = None , _UpperCAmelCase : Union[List[str], None] = None , _UpperCAmelCase : Union[str, List[str], None] = None , _UpperCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )]
if identifier is not None:
UpperCAmelCase__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for n_ in n_identifier:
UpperCAmelCase__ = [file for file in files if n_ not in file]
else:
UpperCAmelCase__ = [file for file in files if n_identifier not in file]
UpperCAmelCase__ = ignore_files or []
ignore_files.append("""__init__.py""" )
UpperCAmelCase__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , _UpperCAmelCase )
if only_modules:
UpperCAmelCase__ = file.split(""".""" )[0]
try:
UpperCAmelCase__ = getattr(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = doctest.DocTestSuite(_UpperCAmelCase )
UpperCAmelCase__ = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
UpperCAmelCase__ = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """modeling"""
UpperCAmelCase__ = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase , ignore_files=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """tokenization"""
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = """configuration"""
self.analyze_directory(_UpperCAmelCase , identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""src/transformers""" )
UpperCAmelCase__ = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_UpperCAmelCase , n_identifier=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = Path("""docs/source""" )
UpperCAmelCase__ = ["""favicon.ico"""]
self.analyze_directory(_UpperCAmelCase , ignore_files=_UpperCAmelCase , only_modules=_UpperCAmelCase )
| 346 | 1 |
"""simple docstring"""
import string
from math import logaa
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
A__ = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
A__ = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
A__ = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
A__ = corpus_without_punctuation.split("""\n""" )
A__ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(a__ ))
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False ):
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
return round(tf * idf , 3 )
| 360 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
SCREAMING_SNAKE_CASE_ : Tuple = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase )
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: Any , **UpperCamelCase: int ):
"""simple docstring"""
super().__init__(**UpperCamelCase )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__( self: Tuple , UpperCamelCase: Union[np.ndarray, bytes, str] , **UpperCamelCase: Tuple ):
"""simple docstring"""
return super().__call__(UpperCamelCase , **UpperCamelCase )
def UpperCamelCase ( self: Tuple , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = {}
if "candidate_labels" in kwargs:
A__ = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
A__ = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[Any]="This is a sound of {}." ):
"""simple docstring"""
if isinstance(UpperCamelCase , UpperCamelCase ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
A__ = requests.get(UpperCamelCase ).content
else:
with open(UpperCamelCase , """rb""" ) as f:
A__ = f.read()
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = ffmpeg_read(UpperCamelCase , self.feature_extractor.sampling_rate )
if not isinstance(UpperCamelCase , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
A__ = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
A__ = candidate_labels
A__ = [hypothesis_template.format(UpperCamelCase ) for x in candidate_labels]
A__ = self.tokenizer(UpperCamelCase , return_tensors=self.framework , padding=UpperCamelCase )
A__ = [text_inputs]
return inputs
def UpperCamelCase ( self: Any , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
A__ = model_inputs.pop("""candidate_labels""" )
A__ = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , UpperCamelCase ):
A__ = text_inputs[0]
else:
# Batching case.
A__ = text_inputs[0][0]
A__ = self.model(**UpperCamelCase , **UpperCamelCase )
A__ = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def UpperCamelCase ( self: Any , UpperCamelCase: Optional[int] ):
"""simple docstring"""
A__ = model_outputs.pop("""candidate_labels""" )
A__ = model_outputs["""logits"""][0]
if self.framework == "pt":
A__ = logits.softmax(dim=0 )
A__ = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
A__ = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(UpperCamelCase , UpperCamelCase ) , key=lambda UpperCamelCase : -x[0] )
]
return result
| 69 | 0 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=7 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=99 , UpperCAmelCase=64 , UpperCAmelCase=32 , UpperCAmelCase=5 , UpperCAmelCase=4 , UpperCAmelCase=37 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=512 , UpperCAmelCase=16 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=3 , UpperCAmelCase=4 , UpperCAmelCase=None , ):
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = embedding_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self ):
"""simple docstring"""
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MobileBertModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase , token_type_ids=UpperCAmelCase )
_UpperCAmelCase = model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MobileBertForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MobileBertForNextSentencePrediction(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MobileBertForPreTraining(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , next_sentence_label=UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = MobileBertForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileBertForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileBertForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = model(UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = MobileBertForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
"feature-extraction": MobileBertModel,
"fill-mask": MobileBertForMaskedLM,
"question-answering": MobileBertForQuestionAnswering,
"text-classification": MobileBertForSequenceClassification,
"token-classification": MobileBertForTokenClassification,
"zero-shot": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ):
"""simple docstring"""
_UpperCAmelCase = super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class in get_values(UpperCAmelCase ):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def UpperCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*UpperCAmelCase )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*UpperCAmelCase )
def __A ( __lowerCAmelCase )-> Optional[Any]:
"""simple docstring"""
return torch.tensor(
__lowerCAmelCase , dtype=torch.long , device=__lowerCAmelCase , )
_a = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(UpperCAmelCase )
_UpperCAmelCase = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
_UpperCAmelCase = model(UpperCAmelCase )[0]
_UpperCAmelCase = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , UpperCAmelCase )
_UpperCAmelCase = torch.tensor(
[
[
[-2.4736526e07, 8.2691656e04, 1.6521838e05],
[-5.7541704e-01, 3.9056022e00, 4.4011507e00],
[2.6047359e00, 1.5677652e00, -1.7324188e-01],
]
] , device=UpperCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_UpperCAmelCase = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
_UpperCAmelCase = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 39 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Any = '''biogpt'''
def __init__( self , lowerCAmelCase_=4_23_84 , lowerCAmelCase_=10_24 , lowerCAmelCase_=24 , lowerCAmelCase_=16 , lowerCAmelCase_=40_96 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=10_24 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , **lowerCAmelCase_ , ) -> Tuple:
_A = vocab_size
_A = max_position_embeddings
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = initializer_range
_A = layer_norm_eps
_A = scale_embedding
_A = use_cache
_A = layerdrop
_A = activation_dropout
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
| 180 | 0 |
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.14.0""", """To fix: pip install -r examples/pytorch/audio-classification/requirements.txt""")
def lowerCamelCase_ ( lowerCAmelCase: np.ndarray , lowerCAmelCase: float , lowerCAmelCase: int = 1_60_00 )-> str:
_snake_case : Optional[Any] = int(round(sample_rate * max_length ) )
if len(lowerCAmelCase ) <= sample_length:
return wav
_snake_case : List[Any] = randint(0 , len(lowerCAmelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[str] =field(default=UpperCAmelCase_ , metadata={"""help""": """Name of a dataset from the datasets package"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """A file containing the training audio paths and labels."""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """A file containing the validation audio paths and labels."""} )
a_ : str =field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
a_ : str =field(
default="""validation""" , metadata={
"""help""": (
"""The name of the training data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
a_ : str =field(
default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , )
a_ : str =field(
default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
a_ : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
a_ : float =field(
default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , )
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : str =field(
default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} )
a_ : str =field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
a_ : Optional[str] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Name or path of preprocessor config."""} )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
a_ : Optional[bool] =field(
default=UpperCAmelCase_ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
a_ : bool =field(
default=UpperCAmelCase_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' , UpperCamelCase , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def lowerCamelCase_ ( )-> str:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , lowerCAmelCase , lowerCAmelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case : Optional[int] = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_snake_case : int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
_snake_case : Dict = DatasetDict()
_snake_case : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--audio_column_name` to the correct audio column - one of '
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """
'Make sure to set `--label_column_name` to the correct text column - one of '
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_snake_case : Any = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_snake_case : Optional[Any] = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_snake_case : Optional[int] = feature_extractor.model_input_names[0]
def train_transforms(lowerCAmelCase: Any ):
_snake_case : List[str] = []
for audio in batch[data_args.audio_column_name]:
_snake_case : int = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(lowerCAmelCase )
_snake_case : Dict = feature_extractor(lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
_snake_case : int = {model_input_name: inputs.get(lowerCAmelCase )}
_snake_case : Tuple = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(lowerCAmelCase: Union[str, Any] ):
_snake_case : List[str] = [audio['array'] for audio in batch[data_args.audio_column_name]]
_snake_case : str = feature_extractor(lowerCAmelCase , sampling_rate=feature_extractor.sampling_rate )
_snake_case : Any = {model_input_name: inputs.get(lowerCAmelCase )}
_snake_case : Optional[int] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_snake_case : Dict = raw_datasets['train'].features[data_args.label_column_name].names
_snake_case , _snake_case : Tuple = {}, {}
for i, label in enumerate(lowerCAmelCase ):
_snake_case : List[str] = str(lowerCAmelCase )
_snake_case : Optional[int] = label
# Load the accuracy metric from the datasets package
_snake_case : str = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase: List[Any] ):
_snake_case : int = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=lowerCAmelCase , references=eval_pred.label_ids )
_snake_case : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowerCAmelCase ) , labelaid=lowerCAmelCase , idalabel=lowerCAmelCase , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case : int = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_snake_case : Optional[Any] = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(lowerCAmelCase , output_all_columns=lowerCAmelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_snake_case : int = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(lowerCAmelCase , output_all_columns=lowerCAmelCase )
# Initialize our trainer
_snake_case : List[str] = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , )
# Training
if training_args.do_train:
_snake_case : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_snake_case : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case : List[Any] = last_checkpoint
_snake_case : Any = trainer.train(resume_from_checkpoint=lowerCAmelCase )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_snake_case : List[str] = trainer.evaluate()
trainer.log_metrics('eval' , lowerCAmelCase )
trainer.save_metrics('eval' , lowerCAmelCase )
# Write model card and (optionally) push to hub
_snake_case : str = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
if __name__ == "__main__":
main()
| 260 |
lowerCAmelCase_ = 256
# Modulus to hash a string
lowerCAmelCase_ = 100_0003
def lowerCamelCase_ ( lowerCAmelCase: str , lowerCAmelCase: str )-> bool:
_snake_case : Optional[int] = len(lowerCAmelCase )
_snake_case : int = len(lowerCAmelCase )
if p_len > t_len:
return False
_snake_case : str = 0
_snake_case : Optional[int] = 0
_snake_case : Union[str, Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCAmelCase ):
_snake_case : Union[str, Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_snake_case : Dict = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_snake_case : Union[str, Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_snake_case : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCamelCase_ ( )-> None:
_snake_case : int = 'abc1abc12'
_snake_case : Optional[int] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
_snake_case : Tuple = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(lowerCAmelCase , lowerCAmelCase ) and not rabin_karp(lowerCAmelCase , lowerCAmelCase )
# Test 2)
_snake_case : List[str] = 'ABABX'
_snake_case : Optional[Any] = 'ABABZABABYABABX'
assert rabin_karp(lowerCAmelCase , lowerCAmelCase )
# Test 3)
_snake_case : Tuple = 'AAAB'
_snake_case : Dict = 'ABAAAAAB'
assert rabin_karp(lowerCAmelCase , lowerCAmelCase )
# Test 4)
_snake_case : List[Any] = 'abcdabcy'
_snake_case : Dict = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(lowerCAmelCase , lowerCAmelCase )
# Test 5)
_snake_case : Optional[int] = 'Lü'
_snake_case : Optional[int] = 'Lüsai'
assert rabin_karp(lowerCAmelCase , lowerCAmelCase )
_snake_case : Any = 'Lue'
assert not rabin_karp(lowerCAmelCase , lowerCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 260 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"tanreinama/GPTSAN-2.8B-spout_is_uniform": (
"https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"
),
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Optional[int] = 'gptsan-japanese'
lowerCAmelCase_ : Optional[Any] = [
'past_key_values',
]
lowerCAmelCase_ : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , a__=36_000 , a__=1_280 , a__=1_024 , a__=8_192 , a__=4_096 , a__=128 , a__=10 , a__=0 , a__=16 , a__=16 , a__=128 , a__=0.0 , a__=1e-5 , a__=False , a__=0.0 , a__="float32" , a__=False , a__=False , a__=False , a__=0.0_0_2 , a__=False , a__=True , a__=35_998 , a__=35_995 , a__=35_999 , **a__ , ) -> str:
'''simple docstring'''
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = d_ff
snake_case_ = d_ext
snake_case_ = d_spout
snake_case_ = num_switch_layers
snake_case_ = num_ext_layers
snake_case_ = num_switch_layers + num_ext_layers
snake_case_ = num_heads
snake_case_ = num_experts
snake_case_ = expert_capacity
snake_case_ = dropout_rate
snake_case_ = layer_norm_epsilon
snake_case_ = router_bias
snake_case_ = router_jitter_noise
snake_case_ = router_dtype
snake_case_ = router_ignore_padding_tokens
snake_case_ = output_hidden_states
snake_case_ = output_attentions
snake_case_ = initializer_factor
snake_case_ = output_router_logits
snake_case_ = use_cache
super().__init__(
separator_token_id=a__ , pad_token_id=a__ , eos_token_id=a__ , **a__ , )
| 85 |
'''simple docstring'''
from math import sqrt
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (
number >= 0
), "'number' must been an int and positive"
_a = True
# 0 and 1 are none primes.
if number <= 1:
_a = False
for divisor in range(2 , int(round(sqrt(__A))) + 1):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_a = False
break
# precondition
assert isinstance(__A , __A), "'status' must been from type bool"
return status
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_a = list(range(2 , n + 1))
_a = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(__A)):
for j in range(i + 1 , len(__A)):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_a = 0
# filters actual prime numbers.
_a = [x for x in begin_list if x != 0]
# precondition
assert isinstance(__A , __A), "'ans' must been from type list"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n > 2), "'N' must been an int and > 2"
_a = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1):
if is_prime(__A):
ans.append(__A)
# precondition
assert isinstance(__A , __A), "'ans' must been from type list"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and number >= 0, "'number' must been an int and >= 0"
_a = [] # this list will be returns of the function.
# potential prime number factors.
_a = 2
_a = number
if number == 0 or number == 1:
ans.append(__A)
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(__A):
while quotient != 1:
if is_prime(__A) and (quotient % factor == 0):
ans.append(__A)
quotient /= factor
else:
factor += 1
else:
ans.append(__A)
# precondition
assert isinstance(__A , __A), "'ans' must been from type list"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (
number >= 0
), "'number' bust been an int and >= 0"
_a = 0
# prime factorization of 'number'
_a = prime_factorization(__A)
_a = max(__A)
# precondition
assert isinstance(__A , __A), "'ans' must been from type int"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (
number >= 0
), "'number' bust been an int and >= 0"
_a = 0
# prime factorization of 'number'
_a = prime_factorization(__A)
_a = min(__A)
# precondition
assert isinstance(__A , __A), "'ans' must been from type int"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A), "'number' must been an int"
assert isinstance(number % 2 == 0 , __A), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A), "'number' must been an int"
assert isinstance(number % 2 != 0 , __A), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase (__A):
"""simple docstring"""
assert (
isinstance(__A , __A) and (number > 2) and is_even(__A)
), "'number' must been an int, even and > 2"
_a = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_a = get_prime_numbers(__A)
_a = len(__A)
# run variable for while-loops.
_a = 0
_a = None
# exit variable. for break up the loops
_a = True
while i < len_pn and loop:
_a = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_a = False
ans.append(prime_numbers[i])
ans.append(prime_numbers[j])
j += 1
i += 1
# precondition
assert (
isinstance(__A , __A)
and (len(__A) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0])
and is_prime(ans[1])
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
isinstance(__A , __A)
and isinstance(__A , __A)
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_a = 0
while numbera != 0:
_a = numbera % numbera
_a = numbera
_a = rest
# precondition
assert isinstance(__A , __A) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
isinstance(__A , __A)
and isinstance(__A , __A)
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_a = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_a = prime_factorization(__A)
_a = prime_factorization(__A)
elif numbera == 1 or numbera == 1:
_a = []
_a = []
_a = max(__A , __A)
_a = 0
_a = 0
_a = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_a = prime_fac_a.count(__A)
_a = prime_fac_a.count(__A)
for _ in range(max(__A , __A)):
ans *= n
else:
_a = prime_fac_a.count(__A)
for _ in range(__A):
ans *= n
done.append(__A)
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_a = prime_fac_a.count(__A)
for _ in range(__A):
ans *= n
done.append(__A)
# precondition
assert isinstance(__A , __A) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n >= 0), "'number' must been a positive int"
_a = 0
_a = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(__A):
ans += 1
# precondition
assert isinstance(__A , __A) and is_prime(
__A), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
is_prime(__A) and is_prime(__A) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_a = p_number_a + 1 # jump to the next number
_a = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(__A):
number += 1
while number < p_number_a:
ans.append(__A)
number += 1
# fetch the next prime number.
while not is_prime(__A):
number += 1
# precondition
assert (
isinstance(__A , __A)
and ans[0] != p_number_a
and ans[len(__A) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n >= 1), "'n' must been int and >= 1"
_a = [] # will be returned.
for divisor in range(1 , n + 1):
if n % divisor == 0:
ans.append(__A)
# precondition
assert ans[0] == 1 and ans[len(__A) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (
number > 1
), "'number' must been an int and >= 1"
_a = get_divisors(__A)
# precondition
assert (
isinstance(__A , __A)
and (divisors[0] == 1)
and (divisors[len(__A) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1]) == number
def lowerCAmelCase (__A , __A):
"""simple docstring"""
assert (
isinstance(__A , __A)
and isinstance(__A , __A)
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_a = gcd(abs(__A) , abs(__A))
# precondition
assert (
isinstance(__A , __A)
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n >= 0), "'n' must been a int and >= 0"
_a = 1 # this will be return.
for factor in range(1 , n + 1):
ans *= factor
return ans
def lowerCAmelCase (__A):
"""simple docstring"""
assert isinstance(__A , __A) and (n >= 0), "'n' must been an int and >= 0"
_a = 0
_a = 1
_a = 1 # this will be return
for _ in range(n - 1):
_a = ans
ans += fiba
_a = tmp
return ans
| 211 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Dict ="""realm"""
def __init__( self : List[str] , UpperCAmelCase_ : Optional[int]=30522 , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : List[str]=128 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Union[str, Any]=8 , UpperCAmelCase_ : Optional[Any]=3072 , UpperCAmelCase_ : Tuple="gelu_new" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : int=1e-1_2 , UpperCAmelCase_ : int=256 , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : Union[str, Any]=1e-3 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Optional[Any]=320 , UpperCAmelCase_ : Tuple=13353718 , UpperCAmelCase_ : Union[str, Any]=5000 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : int=2 , **UpperCAmelCase_ : List[str] , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
# Common config
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = retriever_proj_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = num_candidates
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Reader config
SCREAMING_SNAKE_CASE__ = span_hidden_size
SCREAMING_SNAKE_CASE__ = max_span_width
SCREAMING_SNAKE_CASE__ = reader_layer_norm_eps
SCREAMING_SNAKE_CASE__ = reader_beam_size
SCREAMING_SNAKE_CASE__ = reader_seq_len
# Retrieval config
SCREAMING_SNAKE_CASE__ = num_block_records
SCREAMING_SNAKE_CASE__ = searcher_beam_size
| 169 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowercase__ :
def __init__( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=99 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=None , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = self.vocab_size - 1
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , *UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTLMHeadModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTDoubleHeadsModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = OpenAIGPTForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A__ : Union[str, Any] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A__ : Any =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A__ : Dict =(
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def A_ ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def A_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=False ):
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = inputs_dict['labels']
SCREAMING_SNAKE_CASE__ = inputs_dict['labels']
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
return inputs_dict
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase_ , n_embd=37 )
def A_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase_ )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCAmelCase_ )
@slow
def A_ ( self : Optional[int] ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = OpenAIGPTModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCAmelCase_ ) # the president is
SCREAMING_SNAKE_CASE__ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
SCREAMING_SNAKE_CASE__ = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase_ )
| 169 | 1 |
def lowerCAmelCase_ (lowerCAmelCase__: int | float | str ):
"""simple docstring"""
try:
UpperCAmelCase_: Tuple = float(lowerCAmelCase__ )
except ValueError:
raise ValueError("""Please enter a valid number""" )
UpperCAmelCase_: List[Any] = decimal - int(lowerCAmelCase__ )
if fractional_part == 0:
return int(lowerCAmelCase__ ), 1
else:
UpperCAmelCase_: Dict = len(str(lowerCAmelCase__ ).split(""".""" )[1] )
UpperCAmelCase_: Tuple = int(decimal * (1_0**number_of_frac_digits) )
UpperCAmelCase_: Any = 1_0**number_of_frac_digits
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = denominator, numerator
while True:
UpperCAmelCase_: int = dividend % divisor
if remainder == 0:
break
UpperCAmelCase_ , UpperCAmelCase_: Any = divisor, remainder
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = numerator / divisor, denominator / divisor
return int(lowerCAmelCase__ ), int(lowerCAmelCase__ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(8_9.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 147 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
a : Dict = logging.get_logger(__name__)
a : int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
a : Tuple = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
a : Optional[int] = {
'RUCAIBox/mvp': 1_024,
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
A = MvpTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, trim_offsets=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""", SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCAmelCase_: str = getattr(SCREAMING_SNAKE_CASE_, pre_tok_state.pop("""type""" ) )
UpperCAmelCase_: Dict = add_prefix_space
UpperCAmelCase_: List[str] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_: Optional[int] = """post_processor"""
UpperCAmelCase_: Any = getattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
if tokenizer_component_instance:
UpperCAmelCase_: Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_: Optional[int] = tuple(state["""sep"""] )
if "cls" in state:
UpperCAmelCase_: int = tuple(state["""cls"""] )
UpperCAmelCase_: Any = False
if state.get("""add_prefix_space""", SCREAMING_SNAKE_CASE_ ) != add_prefix_space:
UpperCAmelCase_: Tuple = add_prefix_space
UpperCAmelCase_: Union[str, Any] = True
if state.get("""trim_offsets""", SCREAMING_SNAKE_CASE_ ) != trim_offsets:
UpperCAmelCase_: Optional[Any] = trim_offsets
UpperCAmelCase_: Dict = True
if changes_to_apply:
UpperCAmelCase_: Tuple = getattr(SCREAMING_SNAKE_CASE_, state.pop("""type""" ) )
UpperCAmelCase_: Dict = component_class(**SCREAMING_SNAKE_CASE_ )
setattr(self.backend_tokenizer, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@property
def __snake_case (self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: List[Any] = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else value
UpperCAmelCase_: str = value
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCAmelCase_: int = kwargs.get("""is_split_into_words""", SCREAMING_SNAKE_CASE_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> BatchEncoding:
UpperCAmelCase_: Union[str, Any] = kwargs.get("""is_split_into_words""", SCREAMING_SNAKE_CASE_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Any = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> int:
UpperCAmelCase_: Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: Dict = [self.sep_token_id]
UpperCAmelCase_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 147 | 1 |
import warnings
from functools import wraps
from typing import Callable
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Callable ):
'''simple docstring'''
@wraps(__lowerCamelCase )
def _inner_fn(*__lowerCamelCase: int , **__lowerCamelCase: List[str] ):
warnings.warn(
(F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , __lowerCamelCase , )
return fn(*__lowerCamelCase , **__lowerCamelCase )
return _inner_fn
| 297 |
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297 | 1 |
def _SCREAMING_SNAKE_CASE ( a ) -> bool:
__A : List[Any] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__A : Dict = set()
return any(
node not in visited and depth_first_search(a , a , a , a )
for node in graph )
def _SCREAMING_SNAKE_CASE ( a , a , a , a ) -> bool:
visited.add(a )
rec_stk.add(a )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a , a , a , a ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 280 |
def _a ( lowerCamelCase: dict ) -> bool:
'''simple docstring'''
__A = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__A = set()
return any(
node not in visited and depth_first_search(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
for node in graph )
def _a ( lowerCamelCase: dict , lowerCamelCase: int , lowerCamelCase: set , lowerCamelCase: set ) -> bool:
'''simple docstring'''
visited.add(lowerCamelCase )
rec_stk.add(lowerCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 117 | 0 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _a ( nn.Module ):
def __init__( self: Optional[int] ) -> str:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Linear(3 , 4 )
lowercase__ = nn.BatchNormad(4 )
lowercase__ = nn.Linear(4 , 5 )
def lowerCamelCase_ ( self: Tuple , UpperCamelCase_: List[str] ) -> str:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_ ) ) )
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: str ) -> int:
"""simple docstring"""
lowercase__ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , model.state_dict() )
lowercase__ = os.path.join(UpperCamelCase_ , '''index.json''' )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowercase__ = os.path.join(UpperCamelCase_ , f'{key}.dat' )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
# TODO: add tests on the fact weights are properly loaded
def lowerCamelCase_ ( self: Any ) -> List[str]:
"""simple docstring"""
lowercase__ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowercase__ = torch.randn(2 , 3 , dtype=UpperCamelCase_ )
with TemporaryDirectory() as tmp_dir:
lowercase__ = offload_weight(UpperCamelCase_ , '''weight''' , UpperCamelCase_ , {} )
lowercase__ = os.path.join(UpperCamelCase_ , '''weight.dat''' )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
self.assertDictEqual(UpperCamelCase_ , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(UpperCamelCase_ ).split('''.''' )[1]}} )
lowercase__ = load_offloaded_weight(UpperCamelCase_ , index['''weight'''] )
self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase_ ( self: int ) -> Any:
"""simple docstring"""
lowercase__ = ModelForTest()
lowercase__ = model.state_dict()
lowercase__ = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
lowercase__ = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key] ) )
lowercase__ = {k: v for k, v in state_dict.items() if '''weight''' in k}
lowercase__ = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase_ , UpperCamelCase_ )
# Duplicates are removed
lowercase__ = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_ )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key] ) )
def lowerCamelCase_ ( self: Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
lowercase__ = extract_submodules_state_dict(UpperCamelCase_ , ['''a.1''', '''a.2'''] )
self.assertDictEqual(UpperCamelCase_ , {'''a.1''': 0, '''a.2''': 2} )
lowercase__ = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
lowercase__ = extract_submodules_state_dict(UpperCamelCase_ , ['''a.1''', '''a.2'''] )
self.assertDictEqual(UpperCamelCase_ , {'''a.1.a''': 0, '''a.2.a''': 2} )
| 93 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return base * power(SCREAMING_SNAKE_CASE , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
lowerCAmelCase = int(input('Enter the base: ').strip())
lowerCAmelCase = int(input('Enter the exponent: ').strip())
lowerCAmelCase = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowerCAmelCase = 1 / result
print(f"""{base} to the power of {exponent} is {result}""")
| 93 | 1 |
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
A__ : Optional[Any] = logging.getLogger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ) -> str:
# save results
if os.path.exists(UpperCAmelCase_ ):
if os.path.exists(os.path.join(UpperCAmelCase_ , 'config.json' ) ) and os.path.isfile(
os.path.join(UpperCAmelCase_ , 'config.json' ) ):
os.remove(os.path.join(UpperCAmelCase_ , 'config.json' ) )
if os.path.exists(os.path.join(UpperCAmelCase_ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(UpperCAmelCase_ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(UpperCAmelCase_ , 'pytorch_model.bin' ) )
else:
os.makedirs(UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Dict=False ) -> Dict:
__lowerCamelCase : Tuple = 2
if unlogit:
__lowerCamelCase : Dict = torch.pow(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = p * torch.log(UpperCAmelCase_ )
__lowerCamelCase : Dict = 0
return -plogp.sum(dim=-1 )
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> Dict:
logger.info('lv, h >\t' + '\t'.join(F'{x + 1}' for x in range(len(UpperCAmelCase_ ) ) ) )
for row in range(len(UpperCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:d}' for x in tensor[row].cpu().data ) )
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Any=False ) -> str:
__lowerCamelCase , __lowerCamelCase : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
__lowerCamelCase : Optional[int] = torch.zeros(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
__lowerCamelCase : Any = torch.zeros(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
if head_mask is None:
__lowerCamelCase : Union[str, Any] = torch.ones(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : int = 0.0
__lowerCamelCase : int = 0.0
for step, inputs in enumerate(tqdm(UpperCAmelCase_ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__lowerCamelCase : Any = tuple(t.to(args.device ) for t in inputs )
((__lowerCamelCase) , ) : Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__lowerCamelCase : str = model(UpperCAmelCase_ , labels=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCAmelCase_ ):
__lowerCamelCase : Tuple = entropy(attn.detach() , UpperCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__lowerCamelCase : Any = 2
__lowerCamelCase : Any = torch.pow(torch.pow(UpperCAmelCase_ , UpperCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
__lowerCamelCase : Tuple = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(UpperCAmelCase_ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(UpperCAmelCase_ )
logger.info('Head ranked by importance scores' )
__lowerCamelCase : List[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__lowerCamelCase : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
__lowerCamelCase : int = head_ranks.view_as(UpperCAmelCase_ )
print_ad_tensor(UpperCAmelCase_ )
return attn_entropy, head_importance, total_loss
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any ) -> List[Any]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = compute_heads_importance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ )
__lowerCamelCase : Dict = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , UpperCAmelCase_ , original_score * args.masking_threshold )
__lowerCamelCase : List[Any] = torch.ones_like(UpperCAmelCase_ )
__lowerCamelCase : List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__lowerCamelCase : Any = original_score
while current_score >= original_score * args.masking_threshold:
__lowerCamelCase : Dict = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__lowerCamelCase : List[Any] = float('Inf' )
__lowerCamelCase : Dict = head_importance.view(-1 ).sort()[1]
if len(UpperCAmelCase_ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__lowerCamelCase : str = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__lowerCamelCase : Optional[Any] = new_head_mask.view(-1 )
__lowerCamelCase : Tuple = 0.0
__lowerCamelCase : List[Any] = new_head_mask.view_as(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = new_head_mask.clone().detach()
print_ad_tensor(UpperCAmelCase_ )
# Compute metric and head importance again
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
__lowerCamelCase : str = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , UpperCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(UpperCAmelCase_ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] ) -> List[str]:
__lowerCamelCase : Tuple = datetime.now()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , compute_importance=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
__lowerCamelCase : List[Any] = 1 / loss
__lowerCamelCase : List[Any] = datetime.now() - before_time
__lowerCamelCase : Any = sum(p.numel() for p in model.parameters() )
__lowerCamelCase : Optional[int] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__lowerCamelCase : List[Any] = [
v,
]
assert sum(len(UpperCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCAmelCase_ )
__lowerCamelCase : Any = sum(p.numel() for p in model.parameters() )
__lowerCamelCase : int = datetime.now()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , compute_importance=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , actually_pruned=UpperCAmelCase_ , )
__lowerCamelCase : Tuple = 1 / loss
__lowerCamelCase : Any = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , UpperCAmelCase_ , UpperCAmelCase_ , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(UpperCAmelCase_ , args.output_dir )
def UpperCAmelCase__ ( ) -> List[str]:
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=UpperCAmelCase_ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=UpperCAmelCase_ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=UpperCAmelCase_ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=UpperCAmelCase_ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=UpperCAmelCase_ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=UpperCAmelCase_ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=UpperCAmelCase_ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=UpperCAmelCase_ , help='Batch size.' )
parser.add_argument('--seed' , type=UpperCAmelCase_ , default=42 )
parser.add_argument('--local_rank' , type=UpperCAmelCase_ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=UpperCAmelCase_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=UpperCAmelCase_ , default='' , help='Can be used for distant debugging.' )
__lowerCamelCase : str = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__lowerCamelCase : Optional[int] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__lowerCamelCase : Optional[int] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__lowerCamelCase : int = torch.device('cuda' , args.local_rank )
__lowerCamelCase : List[Any] = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__lowerCamelCase : str = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__lowerCamelCase : Optional[Any] = nn.parallel.DistributedDataParallel(
UpperCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase_ )
elif args.n_gpu > 1:
__lowerCamelCase : Optional[Any] = nn.DataParallel(UpperCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCAmelCase_ )
torch.save(UpperCAmelCase_ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , UpperCAmelCase_ )
# Prepare dataset
__lowerCamelCase : Dict = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__lowerCamelCase : Optional[Any] = (torch.from_numpy(UpperCAmelCase_ ),)
__lowerCamelCase : List[Any] = TensorDataset(*UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = RandomSampler(UpperCAmelCase_ )
__lowerCamelCase : List[str] = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__lowerCamelCase : Optional[Any] = mask_heads(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
prune_heads(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 185 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] ) -> str:
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Optional[int] = len(UpperCAmelCase_ )
for i in range(n - 1 ):
for j in range(i + 1 , UpperCAmelCase_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def UpperCAmelCase__ ( UpperCAmelCase_ : str ) -> Optional[int]:
if len(UpperCAmelCase_ ) <= 1:
return arr, 0
__lowerCamelCase : str = len(UpperCAmelCase_ ) // 2
__lowerCamelCase : List[Any] = arr[0:mid]
__lowerCamelCase : List[str] = arr[mid:]
__lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = count_inversions_recursive(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Any = _count_cross_inversions(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Optional[int] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) -> Optional[Any]:
__lowerCamelCase : List[str] = []
__lowerCamelCase : Optional[int] = 0
while i < len(UpperCAmelCase_ ) and j < len(UpperCAmelCase_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(UpperCAmelCase_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(UpperCAmelCase_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def UpperCAmelCase__ ( ) -> List[str]:
__lowerCamelCase : Any = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Dict = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , UpperCAmelCase_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : int = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , UpperCAmelCase_ )
# an empty list should also have zero inversions
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[Any] = count_inversions_bf(UpperCAmelCase_ )
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = count_inversions_recursive(UpperCAmelCase_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 185 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a__:
def __init__( self : Optional[int] , __snake_case : int=2 , __snake_case : str=3 , __snake_case : Union[str, Any]=64 , __snake_case : Dict=None ):
a : Optional[Any] = np.random.default_rng(__snake_case )
a : int = length
a : List[str] = rng.normal(size=(length,) ).astype(np.floataa )
a : List[str] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : str ):
return self.length
def __getitem__( self : str , __snake_case : int ):
return {"x": self.x[i], "y": self.y[i]}
class a__( torch.nn.Module ):
def __init__( self : List[Any] , __snake_case : str=0 , __snake_case : Any=0 , __snake_case : Optional[Any]=False ):
super().__init__()
a : Union[str, Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
a : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
a : List[str] = True
def lowercase_ ( self : Union[str, Any] , __snake_case : str=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
a : Tuple = False
return x * self.a[0] + self.b[0]
class a__( torch.nn.Module ):
def __init__( self : Tuple , __snake_case : Union[str, Any]=0 , __snake_case : Any=0 , __snake_case : str=False ):
super().__init__()
a : str = torch.nn.Parameter(torch.tensor(__snake_case ).float() )
a : Optional[int] = torch.nn.Parameter(torch.tensor(__snake_case ).float() )
a : Union[str, Any] = True
def lowercase_ ( self : int , __snake_case : Optional[Any]=None ):
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
a : List[str] = False
return x * self.a + self.b
def lowerCamelCase__ ( _A , _A = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
a : str = AutoTokenizer.from_pretrained('bert-base-cased' )
a : List[str] = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
a : List[str] = load_dataset('csv' , data_files=_A )
a : Optional[Any] = datasets['train'].unique('label' )
a : Dict = {v: i for i, v in enumerate(_A )}
def tokenize_function(_A ):
# max_length=None => use the model max length (it's actually the default)
a : Any = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=_A , max_length=_A , padding='max_length' )
if "label" in examples:
a : Dict = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a : Tuple = datasets.map(
_A , batched=_A , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(_A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_A , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_A , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
a : Optional[Any] = DataLoader(tokenized_datasets['train'] , shuffle=_A , collate_fn=_A , batch_size=2 )
a : Optional[Any] = DataLoader(tokenized_datasets['validation'] , shuffle=_A , collate_fn=_A , batch_size=1 )
return train_dataloader, eval_dataloader | 96 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
lowercase__ = True
def lowercase_ ( self : int ):
super().setUp()
a : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Any , __snake_case : str ):
a : Union[str, Any] = 'こんにちは、世界。 \nこんばんは、世界。'
a : List[Any] = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def lowercase_ ( self : Optional[Any] , __snake_case : Optional[Any] ):
a , a : List[str] = self.get_input_output_texts(__snake_case )
a : Optional[int] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
a : str = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case )
return text, ids
def lowercase_ ( self : Optional[Any] ):
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
pass # TODO add if relevant
def lowercase_ ( self : Dict ):
pass # TODO add if relevant
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.tokenizer_class(self.vocab_file )
a : Optional[int] = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。' )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowercase_ ( self : Union[str, Any] ):
a : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab' )
self.assertIsNotNone(__snake_case )
a : List[str] = 'こんにちは、世界。\nこんばんは、世界。'
a : Tuple = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Optional[int] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : Optional[Any] = pickle.load(__snake_case )
a : Tuple = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowercase_ ( self : Dict ):
a : List[str] = MecabTokenizer(mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : List[Any] ):
try:
a : int = MecabTokenizer(mecab_dic='unidic_lite' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : Any ):
try:
a : Union[str, Any] = MecabTokenizer(mecab_dic='unidic' )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : str ):
a : Tuple = MecabTokenizer(do_lower_case=__snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def lowercase_ ( self : Union[str, Any] ):
try:
a : Any = MecabTokenizer(
do_lower_case=__snake_case , normalize_text=__snake_case , mecab_option='-d /usr/local/lib/mecab/dic/jumandic' )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def lowercase_ ( self : List[Any] ):
a : Dict = MecabTokenizer(normalize_text=__snake_case , mecab_dic='ipadic' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def lowercase_ ( self : str ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi' )
self.assertIsNotNone(__snake_case )
a : List[Any] = 'こんにちは、世界。\nこんばんは、世界。'
a : int = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Tuple = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : Optional[int] = pickle.load(__snake_case )
a : List[Any] = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@require_sudachi
def lowercase_ ( self : List[Any] ):
a : Optional[Any] = SudachiTokenizer(sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Any ):
a : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国', '人', '参政', '権'] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
a : Optional[int] = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人', '参政権'] )
@require_sudachi
def lowercase_ ( self : Optional[Any] ):
a : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C' )
self.assertListEqual(tokenizer.tokenize('外国人参政権' ) , ['外国人参政権'] )
@require_sudachi
def lowercase_ ( self : Dict ):
a : Optional[int] = SudachiTokenizer(do_lower_case=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Tuple ):
a : int = SudachiTokenizer(normalize_text=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def lowercase_ ( self : Union[str, Any] ):
a : List[str] = SudachiTokenizer(trim_whitespace=__snake_case , sudachi_dict_type='core' )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def lowercase_ ( self : List[Any] ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp' )
self.assertIsNotNone(__snake_case )
a : str = 'こんにちは、世界。\nこんばんは、世界。'
a : Tuple = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
a : Optional[Any] = os.path.join(self.tmpdirname , 'tokenizer.bin' )
with open(__snake_case , 'wb' ) as handle:
pickle.dump(__snake_case , __snake_case )
with open(__snake_case , 'rb' ) as handle:
a : List[str] = pickle.load(__snake_case )
a : Any = tokenizer_new.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
@require_jumanpp
def lowercase_ ( self : List[str] ):
a : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : List[str] ):
a : List[Any] = JumanppTokenizer(do_lower_case=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : Any ):
a : List[Any] = JumanppTokenizer(normalize_text=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def lowercase_ ( self : Any ):
a : str = JumanppTokenizer(trim_whitespace=__snake_case )
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ' ) , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def lowercase_ ( self : Tuple ):
a : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。' ) , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def lowercase_ ( self : Any ):
a : int = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
a : Optional[int] = {}
for i, token in enumerate(__snake_case ):
a : Dict = i
a : Optional[Any] = WordpieceTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こんにちは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは' ) , ['こん', '##ばんは'] )
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは' ) , ['こん', '##ばんは', '[UNK]', 'こんにちは'] )
def lowercase_ ( self : Tuple ):
a : List[Any] = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp' )
a : List[Any] = tokenizer.subword_tokenizer
a : List[str] = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。' )
self.assertListEqual(__snake_case , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'] )
a : Union[str, Any] = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは' )
self.assertListEqual(__snake_case , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'] )
def lowercase_ ( self : Union[str, Any] ):
a : Optional[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese' )
a : Dict = tokenizer.encode('ありがとう。' , add_special_tokens=__snake_case )
a : str = tokenizer.encode('どういたしまして。' , add_special_tokens=__snake_case )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = BertJapaneseTokenizer
lowercase__ = False
def lowercase_ ( self : List[Any] ):
super().setUp()
a : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowercase_ ( self : Optional[Any] , **__snake_case : List[Any] ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **__snake_case )
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
a : int = 'こんにちは、世界。 \nこんばんは、世界。'
a : Optional[Any] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def lowercase_ ( self : str ):
pass # TODO add if relevant
def lowercase_ ( self : List[str] ):
pass # TODO add if relevant
def lowercase_ ( self : Any ):
pass # TODO add if relevant
def lowercase_ ( self : Any ):
a : Optional[int] = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character' )
a : Tuple = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。' )
self.assertListEqual(
__snake_case , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__snake_case ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowercase_ ( self : Any ):
a : Union[str, Any] = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : Optional[Any] = {}
for i, token in enumerate(__snake_case ):
a : Tuple = i
a : Optional[int] = CharacterTokenizer(vocab=__snake_case , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('こんにちは' ) , ['こ', 'ん', 'に', 'ち', 'は'] )
self.assertListEqual(tokenizer.tokenize('こんにちほ' ) , ['こ', 'ん', 'に', 'ち', '[UNK]'] )
def lowercase_ ( self : Tuple ):
a : List[Any] = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char' )
a : Optional[int] = tokenizer.encode('ありがとう。' , add_special_tokens=__snake_case )
a : List[str] = tokenizer.encode('どういたしまして。' , add_special_tokens=__snake_case )
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(__snake_case )
a : Dict = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class a__( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
a : List[Any] = 'cl-tohoku/bert-base-japanese'
a : Dict = AutoTokenizer.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
class a__( unittest.TestCase ):
def lowercase_ ( self : Union[str, Any] ):
a : List[str] = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertTokenizer.from_pretrained(__snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) )
a : Dict = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING' ) as cm:
BertJapaneseTokenizer.from_pretrained(__snake_case )
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.' ) ) | 96 | 1 |
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Union[str, Any] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__lowercase , '''embed_dim'''))
self.parent.assertTrue(hasattr(__lowercase , '''num_heads'''))
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=64 , __lowercase=3 , __lowercase=[16, 48, 96] , __lowercase=[1, 3, 6] , __lowercase=[1, 2, 10] , __lowercase=[7, 3, 3] , __lowercase=[4, 2, 2] , __lowercase=[2, 1, 1] , __lowercase=[2, 2, 2] , __lowercase=[False, False, True] , __lowercase=[0.0, 0.0, 0.0] , __lowercase=0.02 , __lowercase=1E-1_2 , __lowercase=True , __lowercase=True , __lowercase=2 , ) -> Any:
__UpperCamelCase :Optional[Any] = parent
__UpperCamelCase :Tuple = batch_size
__UpperCamelCase :List[Any] = image_size
__UpperCamelCase :str = patch_sizes
__UpperCamelCase :Optional[Any] = patch_stride
__UpperCamelCase :int = patch_padding
__UpperCamelCase :int = is_training
__UpperCamelCase :Optional[Any] = use_labels
__UpperCamelCase :str = num_labels
__UpperCamelCase :Tuple = num_channels
__UpperCamelCase :Optional[Any] = embed_dim
__UpperCamelCase :List[Any] = num_heads
__UpperCamelCase :Dict = stride_kv
__UpperCamelCase :Optional[Any] = depth
__UpperCamelCase :Any = cls_token
__UpperCamelCase :Dict = attention_drop_rate
__UpperCamelCase :int = initializer_range
__UpperCamelCase :str = layer_norm_eps
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCamelCase :int = None
if self.use_labels:
# create a random int32 tensor of given shape
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.num_labels)
__UpperCamelCase :Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self) -> Optional[int]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Dict:
__UpperCamelCase :Any = TFCvtModel(config=__lowercase)
__UpperCamelCase :Optional[int] = model(__lowercase , training=__lowercase)
__UpperCamelCase :List[Any] = (self.image_size, self.image_size)
__UpperCamelCase , __UpperCamelCase :Tuple = image_size[0], image_size[1]
for i in range(len(self.depth)):
__UpperCamelCase :Tuple = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
__UpperCamelCase :int = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :List[str] = self.num_labels
__UpperCamelCase :Any = TFCvtForImageClassification(__lowercase)
__UpperCamelCase :int = model(__lowercase , labels=__lowercase , training=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[str] = config_and_inputs
__UpperCamelCase :List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
a__ : str = (
{"""feature-extraction""": TFCvtModel, """image-classification""": TFCvtForImageClassification}
if is_tf_available()
else {}
)
a__ : List[str] = False
a__ : str = False
a__ : List[Any] = False
a__ : List[Any] = False
a__ : Optional[int] = False
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :List[str] = TFCvtModelTester(self)
__UpperCamelCase :List[str] = TFCvtConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> Union[str, Any]:
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''')
def UpperCamelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''')
def UpperCamelCase__ ( self) -> Any:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''')
def UpperCamelCase__ ( self) -> str:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''')) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def UpperCamelCase__ ( self) -> int:
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''')) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def UpperCamelCase__ ( self) -> List[str]:
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''')
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :int = tf.keras.mixed_precision.Policy('''mixed_float16''')
tf.keras.mixed_precision.set_global_policy(__lowercase)
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''')
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase , __UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :Dict = model_class(__lowercase)
__UpperCamelCase :Dict = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase :Dict = [*signature.parameters.keys()]
__UpperCamelCase :Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase)
def UpperCamelCase__ ( self) -> str:
def check_hidden_states_output(__lowercase , __lowercase , __lowercase):
__UpperCamelCase :Tuple = model_class(__lowercase)
__UpperCamelCase :Optional[int] = model(**self._prepare_for_class(__lowercase , __lowercase))
__UpperCamelCase :List[Any] = outputs.hidden_states
__UpperCamelCase :Any = len(self.model_tester.depth)
self.assertEqual(len(__lowercase) , __lowercase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__UpperCamelCase , __UpperCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :int = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase :Optional[Any] = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> Any:
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Optional[int] = TFCvtModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self) -> Dict:
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
@slow
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :List[str] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0])
__UpperCamelCase :Optional[Any] = self.default_image_processor
__UpperCamelCase :Tuple = prepare_img()
__UpperCamelCase :Dict = image_processor(images=__lowercase , return_tensors='''tf''')
# forward pass
__UpperCamelCase :str = model(**__lowercase)
# verify the logits
__UpperCamelCase :List[str] = tf.TensorShape((1, 1_000))
self.assertEqual(outputs.logits.shape , __lowercase)
__UpperCamelCase :Dict = tf.constant([0.92_85, 0.90_15, -0.31_50])
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , __lowercase , atol=1E-4))
| 43 | import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : int = StableUnCLIPImgaImgPipeline
a__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
a__ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a__ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a__ : int = frozenset([] )
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Tuple = 32
__UpperCamelCase :Optional[int] = embedder_hidden_size
# image encoding components
__UpperCamelCase :Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32)
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__lowercase , projection_dim=__lowercase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ))
# regular denoising components
torch.manual_seed(0)
__UpperCamelCase :str = StableUnCLIPImageNormalizer(embedding_dim=__lowercase)
__UpperCamelCase :Optional[int] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''')
torch.manual_seed(0)
__UpperCamelCase :Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
torch.manual_seed(0)
__UpperCamelCase :Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__lowercase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ))
torch.manual_seed(0)
__UpperCamelCase :List[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__lowercase , layers_per_block=1 , upcast_attention=__lowercase , use_linear_projection=__lowercase , )
torch.manual_seed(0)
__UpperCamelCase :Tuple = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type='''v_prediction''' , set_alpha_to_one=__lowercase , steps_offset=1 , )
torch.manual_seed(0)
__UpperCamelCase :List[str] = AutoencoderKL()
__UpperCamelCase :Tuple = {
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def UpperCamelCase__ ( self , __lowercase , __lowercase=0 , __lowercase=True) -> str:
if str(__lowercase).startswith('''mps'''):
__UpperCamelCase :Union[str, Any] = torch.manual_seed(__lowercase)
else:
__UpperCamelCase :int = torch.Generator(device=__lowercase).manual_seed(__lowercase)
__UpperCamelCase :int = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase)).to(__lowercase)
if pil_image:
__UpperCamelCase :List[Any] = input_image * 0.5 + 0.5
__UpperCamelCase :Optional[Any] = input_image.clamp(0 , 1)
__UpperCamelCase :int = input_image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
__UpperCamelCase :Optional[Any] = DiffusionPipeline.numpy_to_pil(__lowercase)[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase :Tuple = self.get_dummy_components()
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline(**__lowercase)
__UpperCamelCase :Optional[Any] = sd_pipe.to(__lowercase)
sd_pipe.set_progress_bar_config(disable=__lowercase)
__UpperCamelCase :List[Any] = self.get_dummy_inputs(__lowercase)
inputs.update({'''image_embeds''': None})
__UpperCamelCase :Any = sd_pipe(**__lowercase).images
__UpperCamelCase :List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase :List[Any] = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[Any] = torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=__lowercase)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase__ ( self) -> Union[str, Any]:
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__lowercase)
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Dict = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
__UpperCamelCase :Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''')
__UpperCamelCase :Any = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :int = torch.Generator(device='''cpu''').manual_seed(0)
__UpperCamelCase :Optional[int] = pipe(__lowercase , '''anime turle''' , generator=__lowercase , output_type='''np''')
__UpperCamelCase :List[Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__lowercase , __lowercase)
def UpperCamelCase__ ( self) -> List[str]:
__UpperCamelCase :Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''')
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase :List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa)
__UpperCamelCase :Union[str, Any] = pipe.to(__lowercase)
pipe.set_progress_bar_config(disable=__lowercase)
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase :Optional[Any] = pipe(
__lowercase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
__UpperCamelCase :int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 43 | 1 |
'''simple docstring'''
class UpperCAmelCase :
def __init__( self :Optional[Any] , lowercase_ :Dict , lowercase_ :Dict , lowercase_ :Union[str, Any] )-> Optional[Any]:
A__ = None
A__ = None
A__ = graph
self._normalize_graph(lowercase_ , lowercase_ )
A__ = len(lowercase_ )
A__ = None
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Dict , lowercase_ :Union[str, Any] )-> Union[str, Any]:
if sources is int:
A__ = [sources]
if sinks is int:
A__ = [sinks]
if len(lowercase_ ) == 0 or len(lowercase_ ) == 0:
return
A__ = sources[0]
A__ = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowercase_ ) > 1 or len(lowercase_ ) > 1:
A__ = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
A__ = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
A__ = max_input_flow
A__ = 0
A__ = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
A__ = max_input_flow
A__ = size - 1
def UpperCAmelCase_ ( self :int )-> Any:
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase_ ( self :int , lowercase_ :str )-> List[Any]:
A__ = algorithm(self )
class UpperCAmelCase :
def __init__( self :str , lowercase_ :Union[str, Any] )-> Union[str, Any]:
A__ = flow_network
A__ = flow_network.verticesCount
A__ = flow_network.sourceIndex
A__ = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
A__ = flow_network.graph
A__ = False
def UpperCAmelCase_ ( self :Optional[int] )-> Optional[Any]:
if not self.executed:
self._algorithm()
A__ = True
def UpperCAmelCase_ ( self :List[Any] )-> Optional[Any]:
pass
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :List[str] , lowercase_ :Optional[Any] )-> Optional[Any]:
super().__init__(lowercase_ )
# use this to save your result
A__ = -1
def UpperCAmelCase_ ( self :List[Any] )-> Optional[int]:
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class UpperCAmelCase ( UpperCamelCase__ ):
def __init__( self :Tuple , lowercase_ :Optional[Any] )-> List[str]:
super().__init__(lowercase_ )
A__ = [[0] * self.verticies_count for i in range(self.verticies_count )]
A__ = [0] * self.verticies_count
A__ = [0] * self.verticies_count
def UpperCAmelCase_ ( self :Union[str, Any] )-> Optional[Any]:
A__ = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
A__ = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
A__ = 0
while i < len(lowercase_ ):
A__ = vertices_list[i]
A__ = self.heights[vertex_index]
self.process_vertex(lowercase_ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowercase_ ) )
A__ = 0
else:
i += 1
A__ = sum(self.preflow[self.source_index] )
def UpperCAmelCase_ ( self :Any , lowercase_ :str )-> Dict:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowercase_ , lowercase_ )
self.relabel(lowercase_ )
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Optional[int] , lowercase_ :Any )-> Any:
A__ = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase_ ( self :Tuple , lowercase_ :Dict )-> int:
A__ = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
A__ = self.heights[to_index]
if min_height is not None:
A__ = min_height + 1
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] =[0]
__lowerCAmelCase : Optional[int] =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__lowerCAmelCase : str =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__lowerCAmelCase : Dict =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__lowerCAmelCase : Tuple =flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 123 |
'''simple docstring'''
def UpperCamelCase ( _lowerCamelCase : int ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ = F"Input value of [number={number}] must be an integer"
raise TypeError(_lowerCamelCase )
if number < 1:
A__ = F"Input value of [number={number}] must be > 0"
raise ValueError(_lowerCamelCase )
A__ = 1
for i in range(1 , _lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 123 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =ShapEPipeline
lowerCamelCase__ =['prompt']
lowerCamelCase__ =['prompt']
lowerCamelCase__ =[
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowerCamelCase__ =False
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return 8
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(a_ )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
__snake_case : Optional[int] = PriorTransformer(**a_ )
return model
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
__snake_case : List[Any] = ShapERenderer(**a_ )
return model
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.dummy_prior
__snake_case : str = self.dummy_text_encoder
__snake_case : str = self.dummy_tokenizer
__snake_case : Tuple = self.dummy_renderer
__snake_case : int = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=10_24 , prediction_type='''sample''' , use_karras_sigmas=a_ , clip_sample=a_ , clip_sample_range=1.0 , )
__snake_case : Union[str, Any] = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def SCREAMING_SNAKE_CASE (self , a_ , a_=0 ):
'''simple docstring'''
if str(a_ ).startswith('''mps''' ):
__snake_case : Tuple = torch.manual_seed(a_ )
else:
__snake_case : Union[str, Any] = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case : Optional[int] = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = '''cpu'''
__snake_case : str = self.get_dummy_components()
__snake_case : List[Any] = self.pipeline_class(**a_ )
__snake_case : str = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(a_ ) )
__snake_case : List[str] = output.images[0]
__snake_case : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = torch_device == '''cpu'''
__snake_case : Tuple = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a_ , relax_max_difference=a_ , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**a_ )
__snake_case : int = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = 1
__snake_case : List[Any] = 2
__snake_case : int = self.get_dummy_inputs(a_ )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Dict = batch_size * [inputs[key]]
__snake_case : Union[str, Any] = pipe(**a_ , num_images_per_prompt=a_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
__snake_case : Optional[Any] = ShapEPipeline.from_pretrained('''openai/shap-e''' )
__snake_case : Optional[int] = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
__snake_case : Optional[Any] = torch.Generator(device=a_ ).manual_seed(0 )
__snake_case : str = pipe(
'''a shark''' , generator=a_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a_ , a_ )
| 102 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
SCREAMING_SNAKE_CASE : Union[str, Any] = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
SCREAMING_SNAKE_CASE : Any = subprocess.check_output(F'git diff --name-only {fork_point_sha}'.split()).decode("""utf-8""").split()
SCREAMING_SNAKE_CASE : Union[str, Any] = """|""".join(sys.argv[1:])
SCREAMING_SNAKE_CASE : int = re.compile(rF'^({joined_dirs}).*?\.py$')
SCREAMING_SNAKE_CASE : str = [x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 102 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : int =logging.get_logger(__name__)
A_ : Optional[int] ={
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = "altclip_text_model"
def __init__( self , a__=25_00_02 , a__=10_24 , a__=24 , a__=16 , a__=40_96 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_14 , a__=1 , a__=0.02 , a__=0.02 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , a__=7_68 , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCamelCase = vocab_size
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = hidden_act
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = type_vocab_size
_lowerCamelCase = initializer_range
_lowerCamelCase = initializer_factor
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = position_embedding_type
_lowerCamelCase = use_cache
_lowerCamelCase = project_dim
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : int = "altclip_vision_model"
def __init__( self , a__=7_68 , a__=30_72 , a__=5_12 , a__=12 , a__=12 , a__=3 , a__=2_24 , a__=32 , a__="quick_gelu" , a__=1e-5 , a__=0.0 , a__=0.02 , a__=1.0 , **a__ , ):
super().__init__(**a__ )
_lowerCamelCase = hidden_size
_lowerCamelCase = intermediate_size
_lowerCamelCase = projection_dim
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = num_channels
_lowerCamelCase = patch_size
_lowerCamelCase = image_size
_lowerCamelCase = initializer_range
_lowerCamelCase = initializer_factor
_lowerCamelCase = attention_dropout
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = hidden_act
@classmethod
def snake_case_ ( cls , a__ , **a__ ):
cls._set_token_in_kwargs(a__ )
_lowerCamelCase , _lowerCamelCase = cls.get_config_dict(a__ , **a__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get('model_type' ) == "altclip":
_lowerCamelCase = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a__ , **a__ )
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = "altclip"
SCREAMING_SNAKE_CASE__ : Dict = True
def __init__( self , a__=None , a__=None , a__=7_68 , a__=2.6592 , **a__ ):
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
_lowerCamelCase = kwargs.pop('text_config_dict' , a__ )
_lowerCamelCase = kwargs.pop('vision_config_dict' , a__ )
super().__init__(**a__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
_lowerCamelCase = {}
# This is the complete result when using `text_config_dict`.
_lowerCamelCase = AltCLIPTextConfig(**a__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
_lowerCamelCase = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
_lowerCamelCase = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(a__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
_lowerCamelCase = {}
# This is the complete result when using `vision_config_dict`.
_lowerCamelCase = AltCLIPVisionConfig(**a__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
_lowerCamelCase = {
str(a__ ): value for key, value in _vision_config_dict['id2label'].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
_lowerCamelCase = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
_lowerCamelCase = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(a__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
_lowerCamelCase = {}
logger.info('`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.' )
if vision_config is None:
_lowerCamelCase = {}
logger.info('`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.' )
_lowerCamelCase = AltCLIPTextConfig(**a__ )
_lowerCamelCase = AltCLIPVisionConfig(**a__ )
_lowerCamelCase = projection_dim
_lowerCamelCase = logit_scale_init_value
_lowerCamelCase = 1.0
@classmethod
def snake_case_ ( cls , a__ , a__ , **a__ ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ )
def snake_case_ ( self ):
_lowerCamelCase = copy.deepcopy(self.__dict__ )
_lowerCamelCase = self.text_config.to_dict()
_lowerCamelCase = self.vision_config.to_dict()
_lowerCamelCase = self.__class__.model_type
return output
| 80 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
A_ : str =logging.get_logger(__name__)
A_ : int =OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
A_ : Optional[int] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> Any:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_lowerCamelCase = model_type_to_module_name(snake_case )
_lowerCamelCase = importlib.import_module(f'.{module_name}' , 'transformers.models' )
try:
return getattr(snake_case , snake_case )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(snake_case , '__name__' , snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_lowerCamelCase = importlib.import_module('transformers' )
if hasattr(snake_case , snake_case ):
return getattr(snake_case , snake_case )
return None
def SCREAMING_SNAKE_CASE_ ( snake_case : Union[str, os.PathLike] , snake_case : Optional[Union[str, os.PathLike]] = None , snake_case : bool = False , snake_case : bool = False , snake_case : Optional[Dict[str, str]] = None , snake_case : Optional[Union[bool, str]] = None , snake_case : Optional[str] = None , snake_case : bool = False , **snake_case : List[str] , )-> Optional[int]:
_lowerCamelCase = get_file_from_repo(
snake_case , snake_case , cache_dir=snake_case , force_download=snake_case , resume_download=snake_case , proxies=snake_case , use_auth_token=snake_case , revision=snake_case , local_files_only=snake_case , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(snake_case , encoding='utf-8' ) as reader:
return json.load(snake_case )
class __a :
def __init__( self ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(a__ )
def snake_case_ ( cls , a__ , **a__ ):
_lowerCamelCase = kwargs.pop('config' , a__ )
_lowerCamelCase = kwargs.pop('trust_remote_code' , a__ )
_lowerCamelCase = True
_lowerCamelCase , _lowerCamelCase = ImageProcessingMixin.get_image_processor_dict(a__ , **a__ )
_lowerCamelCase = config_dict.get('image_processor_type' , a__ )
_lowerCamelCase = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
_lowerCamelCase = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_lowerCamelCase = config_dict.pop('feature_extractor_type' , a__ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_lowerCamelCase = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
_lowerCamelCase = config_dict['auto_map']['AutoFeatureExtractor']
_lowerCamelCase = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(a__ , a__ ):
_lowerCamelCase = AutoConfig.from_pretrained(a__ , **a__ )
# It could be in `config.image_processor_type``
_lowerCamelCase = getattr(a__ , 'image_processor_type' , a__ )
if hasattr(a__ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_lowerCamelCase = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_lowerCamelCase = image_processor_class_from_name(a__ )
_lowerCamelCase = image_processor_auto_map is not None
_lowerCamelCase = image_processor_class is not None or type(a__ ) in IMAGE_PROCESSOR_MAPPING
_lowerCamelCase = resolve_trust_remote_code(
a__ , a__ , a__ , a__ )
if has_remote_code and trust_remote_code:
_lowerCamelCase = get_class_from_dynamic_module(
a__ , a__ , **a__ )
_lowerCamelCase = kwargs.pop('code_revision' , a__ )
if os.path.isdir(a__ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(a__ , **a__ )
elif image_processor_class is not None:
return image_processor_class.from_dict(a__ , **a__ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(a__ ) in IMAGE_PROCESSOR_MAPPING:
_lowerCamelCase = IMAGE_PROCESSOR_MAPPING[type(a__ )]
return image_processor_class.from_dict(a__ , **a__ )
raise ValueError(
F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def snake_case_ ( a__ , a__ ):
IMAGE_PROCESSOR_MAPPING.register(a__ , a__ )
| 80 | 1 |
"""simple docstring"""
from math import factorial
def __magic_name__ ( lowercase = 100 ):
return sum(int(lowercase ) for x in str(factorial(lowercase ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 173 |
"""simple docstring"""
import datasets
from .evaluate import evaluate
_UpperCAmelCase = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_UpperCAmelCase = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
_UpperCAmelCase = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def lowerCamelCase__ ( self : int ) -> List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , )
def lowerCamelCase__ ( self : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Dict ={prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
SCREAMING_SNAKE_CASE_: Tuple =[
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_: str =evaluate(dataset=lowerCAmelCase , predictions=lowerCAmelCase )
return score
| 173 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''text''': Value('''string''' )} )
_lowerCamelCase = Features({} )
_lowerCamelCase = "text"
@property
def UpperCamelCase__ ( self ) -> Dict[str, str]:
return {self.text_column: "text"}
| 77 |
"""simple docstring"""
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
A = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def _A ( _a : int ):
"""simple docstring"""
A = abs(_a )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def _A ( _a : int ):
"""simple docstring"""
return sum(int(_a ) for c in str(abs(_a ) ) )
def _A ( ):
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_a : Callable , _a : int ) -> None:
A = f'{func.__name__}({value})'
A = timeit(f'__main__.{call}' , setup="""import __main__""" )
print(f'{call:56} = {func(_a )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_a , _a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 77 | 1 |
import os
from math import logaa
def a__ ( UpperCAmelCase : str = "base_exp.txt" ) -> int:
UpperCAmelCase : float = 0
UpperCAmelCase : Any = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCAmelCase ) , UpperCAmelCase ) ) ):
UpperCAmelCase , UpperCAmelCase : Optional[Any] = list(map(UpperCAmelCase , line.split(''',''' ) ) )
if x * logaa(UpperCAmelCase ) > largest:
UpperCAmelCase : str = x * logaa(UpperCAmelCase )
UpperCAmelCase : List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 336 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
lowercase__ : Union[str, Any] = [0] * len(__lowerCamelCase )
lowercase__ : List[str] = []
lowercase__ : Any = [1] * len(__lowerCamelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCamelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCamelCase )
while queue:
lowercase__ : Tuple = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowercase__ : Optional[Any] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowerCamelCase )
print(max(__lowerCamelCase ) )
# Adjacency list of Graph
lowerCAmelCase_ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 302 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ = 16
lowerCAmelCase_ = 32
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase = 16 ) -> Optional[int]:
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
lowercase__ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
lowercase__ : List[str] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__lowerCamelCase , max_length=__lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowercase__ : Dict = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowercase__ : int = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowercase__ : List[str] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowercase__ : List[str] = 16
elif accelerator.mixed_precision != "no":
lowercase__ : List[Any] = 8
else:
lowercase__ : Optional[int] = None
return tokenizer.pad(
__lowerCamelCase , padding='''longest''' , max_length=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowercase__ : Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
lowercase__ : Union[str, Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase_ = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Tuple:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __lowerCamelCase ) == "1":
lowercase__ : Any = 2
# Initialize accelerator
lowercase__ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowercase__ : List[Any] = config['''lr''']
lowercase__ : Union[str, Any] = int(config['''num_epochs'''] )
lowercase__ : List[str] = int(config['''seed'''] )
lowercase__ : Any = int(config['''batch_size'''] )
lowercase__ : int = evaluate.load('''glue''' , '''mrpc''' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__lowerCamelCase )
def inner_training_loop(__lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowercase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowercase__ : str = model.to(accelerator.device )
# Instantiate optimizer
lowercase__ : Optional[int] = AdamW(params=model.parameters() , lr=__lowerCamelCase )
lowercase__ , lowercase__ : List[str] = get_dataloaders(__lowerCamelCase , __lowerCamelCase )
# Instantiate scheduler
lowercase__ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCamelCase , num_warmup_steps=1_00 , num_training_steps=(len(__lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = accelerator.prepare(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Now we train the model
for epoch in range(__lowerCamelCase ):
model.train()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
lowercase__ : int = model(**__lowerCamelCase )
lowercase__ : Optional[int] = outputs.loss
accelerator.backward(__lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowercase__ : Tuple = model(**__lowerCamelCase )
lowercase__ : Dict = outputs.logits.argmax(dim=-1 )
lowercase__ , lowercase__ : Any = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__lowerCamelCase , references=__lowerCamelCase , )
lowercase__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( ) -> Tuple:
lowercase__ : List[str] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__lowerCamelCase , default=__lowerCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
lowercase__ : Union[str, Any] = parser.parse_args()
lowercase__ : Union[str, Any] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 302 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = 'poolformer'
def __init__( self : Optional[Any] ,lowercase__ : int=3 ,lowercase__ : int=1_6 ,lowercase__ : int=1_6 ,lowercase__ : str=3 ,lowercase__ : Dict=4.0 ,lowercase__ : str=[2, 2, 6, 2] ,lowercase__ : List[Any]=[6_4, 1_2_8, 3_2_0, 5_1_2] ,lowercase__ : Union[str, Any]=[7, 3, 3, 3] ,lowercase__ : Tuple=[4, 2, 2, 2] ,lowercase__ : Any=[2, 1, 1, 1] ,lowercase__ : Dict=4 ,lowercase__ : int=0.0 ,lowercase__ : str="gelu" ,lowercase__ : int=True ,lowercase__ : Dict=1e-5 ,lowercase__ : Dict=0.0_2 ,**lowercase__ : List[str] ,):
__lowercase = num_channels
__lowercase = patch_size
__lowercase = stride
__lowercase = padding
__lowercase = pool_size
__lowercase = hidden_sizes
__lowercase = mlp_ratio
__lowercase = depths
__lowercase = patch_sizes
__lowercase = strides
__lowercase = num_encoder_blocks
__lowercase = drop_path_rate
__lowercase = hidden_act
__lowercase = use_layer_scale
__lowercase = layer_scale_init_value
__lowercase = initializer_range
super().__init__(**lowercase__ )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE ( self : int ):
return 2e-3
| 104 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowerCAmelCase__ = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _A ( A__=None ):
"""simple docstring"""
if subparsers is not None:
__lowercase = subparsers.add_parser('''tpu-config''' , description=_description )
else:
__lowercase = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
__lowercase = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=A__ , default=A__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=A__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=A__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
__lowercase = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=A__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def _A ( A__ ):
"""simple docstring"""
__lowercase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(A__ ):
__lowercase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__lowercase = defaults.command_file
if not args.command and defaults.commands is not None:
__lowercase = defaults.commands
if not args.tpu_name:
__lowercase = defaults.tpu_name
if not args.tpu_zone:
__lowercase = defaults.tpu_zone
if args.accelerate_version == "dev":
__lowercase = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
__lowercase = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , A__ ):
__lowercase = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
__lowercase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , A__ ):
__lowercase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__lowercase = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
__lowercase = '''; '''.join(A__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__lowercase = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(A__ )}" )
return
subprocess.run(A__ )
print('''Successfully setup pod.''' )
def _A ( ):
"""simple docstring"""
__lowercase = tpu_command_parser()
__lowercase = parser.parse_args()
tpu_command_launcher(A__ )
| 104 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __snake_case (unittest.TestCase ):
def __init__( self : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=7 , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : List[Any]=30 , _UpperCAmelCase : Optional[Any]=400 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[str]=1 / 255 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : List[Any]=[0.5, 0.5, 0.5] , _UpperCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , _UpperCAmelCase : Dict=True , ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
_lowerCAmelCase : Any = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : List[str] = min_resolution
_lowerCAmelCase : Dict = max_resolution
_lowerCAmelCase : int = do_resize
_lowerCAmelCase : List[Any] = size
_lowerCAmelCase : int = do_rescale
_lowerCAmelCase : Optional[int] = rescale_factor
_lowerCAmelCase : Union[str, Any] = do_normalize
_lowerCAmelCase : Tuple = image_mean
_lowerCAmelCase : Union[str, Any] = image_std
_lowerCAmelCase : str = do_pad
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : int=False ) -> Dict:
'''simple docstring'''
if not batched:
_lowerCAmelCase : Any = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = image.size
else:
_lowerCAmelCase , _lowerCAmelCase : Any = image.shape[1], image.shape[2]
if w < h:
_lowerCAmelCase : Dict = int(self.size["""shortest_edge"""] * h / w )
_lowerCAmelCase : List[str] = self.size["""shortest_edge"""]
elif w > h:
_lowerCAmelCase : Optional[Any] = self.size["""shortest_edge"""]
_lowerCAmelCase : Optional[int] = int(self.size["""shortest_edge"""] * w / h )
else:
_lowerCAmelCase : List[Any] = self.size["""shortest_edge"""]
_lowerCAmelCase : int = self.size["""shortest_edge"""]
else:
_lowerCAmelCase : Optional[Any] = []
for image in image_inputs:
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCAmelCase : int = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[0] )[0]
_lowerCAmelCase : Dict = max(_UpperCAmelCase , key=lambda _UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __snake_case (_a , unittest.TestCase ):
lowerCAmelCase__ = DetrImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = DetrImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , """image_mean""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """image_std""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """do_rescale""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """rescale_factor""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """size""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """do_pad""" ) )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
_lowerCAmelCase : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
_lowerCAmelCase : str = image_processing(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_lowerCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase : Tuple = image_processing(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCAmelCase : Any = image_processing(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_lowerCAmelCase : Any = json.loads(f.read() )
_lowerCAmelCase : Optional[int] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
_lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
_lowerCAmelCase : List[str] = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors="""pt""" )
# verify pixel values
_lowerCAmelCase : int = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _UpperCAmelCase )
_lowerCAmelCase : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _UpperCAmelCase , atol=1E-4 ) )
# verify area
_lowerCAmelCase : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _UpperCAmelCase ) )
# verify boxes
_lowerCAmelCase : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _UpperCAmelCase )
_lowerCAmelCase : List[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _UpperCAmelCase , atol=1E-3 ) )
# verify image_id
_lowerCAmelCase : Tuple = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _UpperCAmelCase ) )
# verify is_crowd
_lowerCAmelCase : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _UpperCAmelCase ) )
# verify class_labels
_lowerCAmelCase : str = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _UpperCAmelCase ) )
# verify orig_size
_lowerCAmelCase : List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _UpperCAmelCase ) )
# verify size
_lowerCAmelCase : List[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _UpperCAmelCase ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_lowerCAmelCase : Dict = json.loads(f.read() )
_lowerCAmelCase : Dict = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
_lowerCAmelCase : str = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_lowerCAmelCase : Optional[Any] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
_lowerCAmelCase : int = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors="""pt""" )
# verify pixel values
_lowerCAmelCase : Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _UpperCAmelCase )
_lowerCAmelCase : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _UpperCAmelCase , atol=1E-4 ) )
# verify area
_lowerCAmelCase : str = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _UpperCAmelCase ) )
# verify boxes
_lowerCAmelCase : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _UpperCAmelCase , atol=1E-3 ) )
# verify image_id
_lowerCAmelCase : Optional[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _UpperCAmelCase ) )
# verify is_crowd
_lowerCAmelCase : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _UpperCAmelCase ) )
# verify class_labels
_lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _UpperCAmelCase ) )
# verify masks
_lowerCAmelCase : Dict = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _UpperCAmelCase )
# verify orig_size
_lowerCAmelCase : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _UpperCAmelCase ) )
# verify size
_lowerCAmelCase : int = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _UpperCAmelCase ) )
| 159 |
from __future__ import annotations
from typing import Generic, TypeVar
_lowerCamelCase : Dict = TypeVar("T")
class __snake_case (Generic[T] ):
def __init__( self : Dict , _UpperCAmelCase : T ) -> None:
'''simple docstring'''
_lowerCAmelCase : List[Any] = data
_lowerCAmelCase : str = self
_lowerCAmelCase : Tuple = 0
class __snake_case (Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
_lowerCAmelCase : dict[T, DisjointSetTreeNode[T]] = {}
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : T ) -> None:
'''simple docstring'''
_lowerCAmelCase : int = DisjointSetTreeNode(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : T ) -> DisjointSetTreeNode[T]:
'''simple docstring'''
_lowerCAmelCase : List[str] = self.map[data]
if elem_ref != elem_ref.parent:
_lowerCAmelCase : Union[str, Any] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : DisjointSetTreeNode[T] , _UpperCAmelCase : DisjointSetTreeNode[T] ) -> None:
'''simple docstring'''
if nodea.rank > nodea.rank:
_lowerCAmelCase : Dict = nodea
else:
_lowerCAmelCase : Union[str, Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : T , _UpperCAmelCase : T ) -> None:
'''simple docstring'''
self.link(self.find_set(_UpperCAmelCase ) , self.find_set(_UpperCAmelCase ) )
class __snake_case (Generic[T] ):
def __init__( self : Optional[int] ) -> None:
'''simple docstring'''
_lowerCAmelCase : dict[T, dict[T, int]] = {}
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : T ) -> None:
'''simple docstring'''
if node not in self.connections:
_lowerCAmelCase : int = {}
def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : T , _UpperCAmelCase : T , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
self.add_node(_UpperCAmelCase )
self.add_node(_UpperCAmelCase )
_lowerCAmelCase : Any = weight
_lowerCAmelCase : int = weight
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> GraphUndirectedWeighted[T]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Union[str, Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _UpperCAmelCase : x[2] )
# creating the disjoint set
_lowerCAmelCase : Dict = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_UpperCAmelCase )
# MST generation
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Any = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = edges[index]
index += 1
_lowerCAmelCase : Dict = disjoint_set.find_set(_UpperCAmelCase )
_lowerCAmelCase : List[str] = disjoint_set.find_set(_UpperCAmelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
disjoint_set.union(_UpperCAmelCase , _UpperCAmelCase )
return graph
| 159 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {"""configuration_plbart""": ["""PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PLBartConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["""PLBartTokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"""PLBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PLBartForCausalLM""",
"""PLBartForConditionalGeneration""",
"""PLBartForSequenceClassification""",
"""PLBartModel""",
"""PLBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 92 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase : List[str] =get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_UpperCAmelCase : Optional[int] =25_0004
_UpperCAmelCase : Tuple =25_0020
@require_sentencepiece
@require_tokenizers
class snake_case__( UpperCAmelCase__, unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = MBartTokenizer
SCREAMING_SNAKE_CASE__ : Dict = MBartTokenizerFast
SCREAMING_SNAKE_CASE__ : Tuple = True
SCREAMING_SNAKE_CASE__ : List[str] = True
def lowercase_ ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ : str = MBartTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = MBartTokenizer(__lowercase , keep_accents=__lowercase )
lowerCAmelCase_ : Dict = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowercase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase_ : Dict = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def lowercase_ ( self ) -> Dict:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCAmelCase_ : int = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : int = self.tokenizer_class.from_pretrained(__lowercase , **__lowercase )
lowerCAmelCase_ : Tuple = tempfile.mkdtemp()
lowerCAmelCase_ : Union[str, Any] = tokenizer_r.save_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCAmelCase_ : str = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Tuple = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : Dict = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=True
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : int = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Tuple = tokenizer_p.save_pretrained(__lowercase )
# Checks it save with the same files
self.assertSequenceEqual(__lowercase , __lowercase )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Optional[int] = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
# Save tokenizer rust, legacy_format=False
lowerCAmelCase_ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase_ : List[str] = tokenizer_r.save_pretrained(__lowercase , legacy_format=__lowercase )
lowerCAmelCase_ : Optional[int] = tokenizer_p.save_pretrained(__lowercase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCAmelCase_ : Dict = tokenizer_r.from_pretrained(__lowercase )
lowerCAmelCase_ : List[Any] = tokenizer_p.from_pretrained(__lowercase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__lowercase , __lowercase ) )
shutil.rmtree(__lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = """facebook/mbart-large-en-ro"""
SCREAMING_SNAKE_CASE__ : int = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
SCREAMING_SNAKE_CASE__ : Optional[int] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
SCREAMING_SNAKE_CASE__ : str = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE]
@classmethod
def lowercase_ ( cls ) -> Optional[int]:
lowerCAmelCase_ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCAmelCase_ : Optional[Any] = 1
return cls
def lowercase_ ( self ) -> Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
def lowercase_ ( self ) -> Tuple:
lowerCAmelCase_ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
def lowercase_ ( self ) -> Any:
self.assertIn(__lowercase , self.tokenizer.all_special_ids )
lowerCAmelCase_ : Union[str, Any] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCAmelCase_ : Tuple = self.tokenizer.decode(__lowercase , skip_special_tokens=__lowercase )
lowerCAmelCase_ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__lowercase )
self.assertEqual(__lowercase , __lowercase )
self.assertNotIn(self.tokenizer.eos_token , __lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Union[str, Any] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , __lowercase )
lowerCAmelCase_ : str = 1_0
lowerCAmelCase_ : Tuple = self.tokenizer(__lowercase , max_length=__lowercase , truncation=__lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __lowercase )
self.assertEqual(len(__lowercase ) , __lowercase )
def lowercase_ ( self ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def lowercase_ ( self ) -> Dict:
lowerCAmelCase_ : Any = tempfile.mkdtemp()
lowerCAmelCase_ : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__lowercase )
lowerCAmelCase_ : Optional[Any] = MBartTokenizer.from_pretrained(__lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __lowercase )
@require_torch
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__lowercase , return_tensors='''pt''' )
lowerCAmelCase_ : Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def lowercase_ ( self ) -> List[Any]:
lowerCAmelCase_ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCAmelCase_ : int = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCAmelCase_ : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = self.tokenizer(self.src_text , padding=__lowercase , truncation=__lowercase , max_length=3 , return_tensors='''pt''' )
lowerCAmelCase_ : Any = self.tokenizer(
text_target=self.tgt_text , padding=__lowercase , truncation=__lowercase , max_length=1_0 , return_tensors='''pt''' )
lowerCAmelCase_ : int = targets['''input_ids''']
lowerCAmelCase_ : Optional[Any] = shift_tokens_right(__lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowercase_ ( self ) -> List[str]:
lowerCAmelCase_ : Any = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(__lowercase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , ) | 262 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return "".join(sorted(SCREAMING_SNAKE_CASE ) )
def _A ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
return word_by_signature[signature(SCREAMING_SNAKE_CASE )]
UpperCAmelCase : str = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
UpperCAmelCase : Dict = sorted({word.strip().lower() for word in data.splitlines()})
UpperCAmelCase : str = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
UpperCAmelCase : Any = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 148 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : str = ["""vqvae"""]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , mel=lowerCAmelCase__ , vqvae=lowerCAmelCase__ )
def _lowercase ( self ) -> int:
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , lowerCAmelCase__ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
a__ : List[Any] =steps or self.get_default_steps()
self.scheduler.set_timesteps(lowerCAmelCase__ )
a__ : Tuple =step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
a__ : List[str] =(self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a__ : Optional[Any] =randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowerCAmelCase__ , device=self.device , )
a__ : List[str] =noise
a__ : Optional[Any] =None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Tuple =self.mel.audio_slice_to_image(lowerCAmelCase__ )
a__ : List[Any] =np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
a__ : Optional[Any] =(input_image / 2_5_5) * 2 - 1
a__ : Dict =torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
a__ : str =self.vqvae.encode(torch.unsqueeze(lowerCAmelCase__ , 0 ) ).latent_dist.sample(
generator=lowerCAmelCase__ )[0]
a__ : Any =self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a__ : Optional[int] =self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler.timesteps[start_step - 1] )
a__ : Tuple =(
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a__ : Union[str, Any] =int(mask_start_secs * pixels_per_second )
a__ : List[str] =int(mask_end_secs * pixels_per_second )
a__ : Optional[Any] =self.scheduler.add_noise(lowerCAmelCase__ , lowerCAmelCase__ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowerCAmelCase__ ):
a__ : List[str] =self.unet(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
else:
a__ : Optional[Any] =self.unet(lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
if isinstance(self.scheduler , lowerCAmelCase__ ):
a__ : int =self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , eta=lowerCAmelCase__ , generator=lowerCAmelCase__ , )["prev_sample"]
else:
a__ : str =self.scheduler.step(
model_output=lowerCAmelCase__ , timestep=lowerCAmelCase__ , sample=lowerCAmelCase__ , generator=lowerCAmelCase__ , )["prev_sample"]
if mask is not None:
if mask_start > 0:
a__ : List[Any] =mask[:, step, :, :mask_start]
if mask_end > 0:
a__ : Union[str, Any] =mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a__ : Any =1 / self.vqvae.config.scaling_factor * images
a__ : str =self.vqvae.decode(lowerCAmelCase__ )["sample"]
a__ : str =(images / 2 + 0.5).clamp(0 , 1 )
a__ : int =images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
a__ : List[Any] =(images * 2_5_5).round().astype("uint8" )
a__ : Dict =list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowerCAmelCase__ , mode="RGB" ).convert("L" ) for _ in images) )
a__ : str =[self.mel.image_to_audio(lowerCAmelCase__ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowerCAmelCase__ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowerCAmelCase__ ) )
@torch.no_grad()
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = 5_0 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowerCAmelCase__ )
self.scheduler.set_timesteps(lowerCAmelCase__ )
a__ : Union[str, Any] =np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
a__ : Tuple =(sample / 2_5_5) * 2 - 1
a__ : List[Any] =torch.Tensor(lowerCAmelCase__ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
a__ : str =t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a__ : Dict =self.scheduler.alphas_cumprod[t]
a__ : Optional[Any] =(
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a__ : Optional[Any] =1 - alpha_prod_t
a__ : str =self.unet(lowerCAmelCase__ , lowerCAmelCase__ )["sample"]
a__ : Optional[Any] =(1 - alpha_prod_t_prev) ** 0.5 * model_output
a__ : List[str] =(sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a__ : Optional[Any] =sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> torch.Tensor:
'''simple docstring'''
a__ : Any =acos(torch.dot(torch.flatten(lowerCAmelCase__ ) , torch.flatten(lowerCAmelCase__ ) ) / torch.norm(lowerCAmelCase__ ) / torch.norm(lowerCAmelCase__ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowerCAmelCase__ ) + sin(alpha * theta ) * xa / sin(lowerCAmelCase__ )
| 148 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
_lowerCAmelCase : int = "facebook/wmt19-en-de"
_lowerCAmelCase : List[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
_lowerCAmelCase : int = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
_lowerCAmelCase : Dict = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_lowerCAmelCase : Optional[int] = tokenizer(["Making tiny model"], return_tensors="pt")
_lowerCAmelCase : Any = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
_lowerCAmelCase : str = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 169 |
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = [randint(-1000 , 1000 ) for i in range(10 )]
UpperCAmelCase__ = randint(-5000 , 5000 )
return (arr, r)
_lowerCAmelCase : Optional[int] = make_dataset()
def lowerCAmelCase ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
for triplet in permutations(_lowerCAmelCase , 3 ):
if sum(_lowerCAmelCase ) == target:
return tuple(sorted(_lowerCAmelCase ) )
return (0, 0, 0)
def lowerCAmelCase ( _lowerCAmelCase : list[int] , _lowerCAmelCase : int ):
"""simple docstring"""
arr.sort()
UpperCAmelCase__ = len(_lowerCAmelCase )
for i in range(n - 1 ):
UpperCAmelCase__ , UpperCAmelCase__ = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n"
UpperCAmelCase__ = "\ntriplet_sum1(*dataset)\n"
UpperCAmelCase__ = "\ntriplet_sum2(*dataset)\n"
UpperCAmelCase__ = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=1_0000 )
UpperCAmelCase__ = repeat(setup=_lowerCAmelCase , stmt=_lowerCAmelCase , repeat=5 , number=1_0000 )
return (min(_lowerCAmelCase ), min(_lowerCAmelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowerCAmelCase : Optional[int] = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''')
| 169 | 1 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
) | 64 | """simple docstring"""
def lowercase_ ( _lowerCamelCase: int = 4000000 ) -> int:
'''simple docstring'''
__lowerCamelCase : Tuple = [0, 1]
__lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
__lowerCamelCase : Tuple = 0
for j in range(len(_lowerCamelCase ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F"""{solution() = }""") | 64 | 1 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def a__ ( _UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Union[str, Any] ):
for param, grad_param in zip(model_a.parameters() ,model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : List[str] ,_UpperCamelCase : int ,_UpperCamelCase : List[Any] ,_UpperCamelCase : Dict=True ):
model.train()
__lowerCamelCase = model(_UpperCamelCase )
__lowerCamelCase = F.mse_loss(_UpperCamelCase ,target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_UpperCamelCase )
def a__ ( _UpperCamelCase : Dict ,_UpperCamelCase : List[Any]=False ):
set_seed(42 )
__lowerCamelCase = RegressionModel()
__lowerCamelCase = deepcopy(_UpperCamelCase )
__lowerCamelCase = RegressionDataset(length=80 )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=16 )
model.to(accelerator.device )
if sched:
__lowerCamelCase = AdamW(params=model.parameters() ,lr=1e-3 )
__lowerCamelCase = AdamW(params=ddp_model.parameters() ,lr=1e-3 )
__lowerCamelCase = LambdaLR(_UpperCamelCase ,lr_lambda=lambda _UpperCamelCase : epoch**0.65 )
__lowerCamelCase = LambdaLR(_UpperCamelCase ,lr_lambda=lambda _UpperCamelCase : epoch**0.65 )
# Make a copy of `model`
if sched:
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
else:
__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def a__ ( _UpperCamelCase : Union[str, Any] ):
# Test when on a single CPU or GPU that the context manager does nothing
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = get_training_setup(_UpperCamelCase )
# Use a single batch
__lowerCamelCase ,__lowerCamelCase = next(iter(_UpperCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowerCamelCase ,__lowerCamelCase = accelerator.gather((ddp_input, ddp_target) )
__lowerCamelCase ,__lowerCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_UpperCamelCase ):
step_model(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
else:
# Sync grads
step_model(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad ,ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__lowerCamelCase = ddp_input[torch.randperm(len(_UpperCamelCase ) )]
def a__ ( _UpperCamelCase : Any ):
# Test on distributed setup that context manager behaves properly
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = get_training_setup(_UpperCamelCase )
# Use a single batch
__lowerCamelCase ,__lowerCamelCase = next(iter(_UpperCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__lowerCamelCase ,__lowerCamelCase = accelerator.gather((ddp_input, ddp_target) )
__lowerCamelCase ,__lowerCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_UpperCamelCase ):
step_model(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
else:
# Sync grads
step_model(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__lowerCamelCase = ddp_input[torch.randperm(len(_UpperCamelCase ) )]
def a__ ( _UpperCamelCase : int=False ,_UpperCamelCase : str=False ):
__lowerCamelCase = Accelerator(
split_batches=_UpperCamelCase ,dispatch_batches=_UpperCamelCase ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = get_training_setup(_UpperCamelCase )
for iteration, batch in enumerate(_UpperCamelCase ):
__lowerCamelCase ,__lowerCamelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowerCamelCase ,__lowerCamelCase = accelerator.gather((ddp_input, ddp_target) )
__lowerCamelCase ,__lowerCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_UpperCamelCase ):
step_model(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() ,ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_UpperCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad ,ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
__lowerCamelCase = ddp_input[torch.randperm(len(_UpperCamelCase ) )]
GradientState._reset_state()
def a__ ( _UpperCamelCase : Any=False ,_UpperCamelCase : Optional[int]=False ):
__lowerCamelCase = Accelerator(
split_batches=_UpperCamelCase ,dispatch_batches=_UpperCamelCase ,gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = get_training_setup(_UpperCamelCase ,_UpperCamelCase )
for iteration, batch in enumerate(_UpperCamelCase ):
__lowerCamelCase ,__lowerCamelCase = batch.values()
# Gather the distributed inputs and targs for the base model
__lowerCamelCase ,__lowerCamelCase = accelerator.gather((ddp_input, ddp_target) )
__lowerCamelCase ,__lowerCamelCase = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_UpperCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_UpperCamelCase ):
step_model(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n"""
__lowerCamelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_UpperCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(13_37 + iteration )
GradientState._reset_state()
def a__ ( ):
__lowerCamelCase = Accelerator()
__lowerCamelCase = RegressionDataset(length=80 )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=16 )
__lowerCamelCase = RegressionDataset(length=96 )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=16 )
__lowerCamelCase ,__lowerCamelCase = accelerator.prepare(_UpperCamelCase ,_UpperCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_UpperCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCamelCase )
if iteration < len(_UpperCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_UpperCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_UpperCamelCase )
if batch_num < len(_UpperCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def a__ ( ):
__lowerCamelCase = Accelerator()
__lowerCamelCase = accelerator.state
if state.local_process_index == 0:
print('''**Test `accumulate` gradient accumulation with dataloader break**''' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('''**Test NOOP `no_sync` context manager**''' )
test_noop_sync(_UpperCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('''**Test Distributed `no_sync` context manager**''' )
test_distributed_sync(_UpperCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation, ''' ,F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,)
test_gradient_accumulation(_UpperCamelCase ,_UpperCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('''<''' ,'''2.0''' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' ,'''`split_batches=False`, `dispatch_batches=False`**''' ,)
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' ,F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" ,)
test_gradient_accumulation_with_opt_and_scheduler(_UpperCamelCase ,_UpperCamelCase )
def a__ ( _UpperCamelCase : Optional[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 330 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
return F"""gaussian_noise_s={seed}_shape={"_".join([str(__UpperCAmelCase ) for s in shape] )}.npy"""
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 4, 64, 64) , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase )
return image
def lowerCamelCase ( self , __UpperCAmelCase=False , __UpperCAmelCase="CompVis/stable-diffusion-v1-4" ):
'''simple docstring'''
__lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase = '''bf16''' if fpaa else None
__lowerCamelCase ,__lowerCamelCase = FlaxUNetaDConditionModel.from_pretrained(
__UpperCAmelCase , subfolder='''unet''' , dtype=__UpperCAmelCase , revision=__UpperCAmelCase )
return model, params
def lowerCamelCase ( self , __UpperCAmelCase=0 , __UpperCAmelCase=(4, 77, 768) , __UpperCAmelCase=False ):
'''simple docstring'''
__lowerCamelCase = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase = jnp.array(load_hf_numpy(self.get_file_format(__UpperCAmelCase , __UpperCAmelCase ) ) , dtype=__UpperCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__UpperCAmelCase )
__lowerCamelCase = self.get_latents(__UpperCAmelCase , fpaa=__UpperCAmelCase )
__lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , fpaa=__UpperCAmelCase )
__lowerCamelCase = model.apply(
{'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample
assert sample.shape == latents.shape
__lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase ,__lowerCamelCase = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__UpperCAmelCase )
__lowerCamelCase = self.get_latents(__UpperCAmelCase , shape=(4, 4, 96, 96) , fpaa=__UpperCAmelCase )
__lowerCamelCase = self.get_encoder_hidden_states(__UpperCAmelCase , shape=(4, 77, 1024) , fpaa=__UpperCAmelCase )
__lowerCamelCase = model.apply(
{'''params''': params} , __UpperCAmelCase , jnp.array(__UpperCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=__UpperCAmelCase , ).sample
assert sample.shape == latents.shape
__lowerCamelCase = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase = jnp.array(__UpperCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-2 )
| 330 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a__ : Dict = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a__ : Any = TaTokenizerFast
a__ : Tuple = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a__ : str = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 364 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
a__ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : int = CLIPConfig
snake_case__ : str = ["CLIPEncoderLayer"]
def __init__( self : Optional[int] , UpperCAmelCase__ : CLIPConfig ) -> Dict:
super().__init__(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = CLIPVisionModelWithProjection(config.vision_config )
__SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
__SCREAMING_SNAKE_CASE = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : int=0.5 , UpperCAmelCase__ : Optional[int]=0.5 ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = self.vision_model(UpperCAmelCase__ )[0]
__SCREAMING_SNAKE_CASE = self.p_head(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = nsfw_detected.flatten()
__SCREAMING_SNAKE_CASE = nsfw_detected > p_threshold
__SCREAMING_SNAKE_CASE = nsfw_detected.tolist()
if any(UpperCAmelCase__ ):
logger.warning(
"Potential NSFW content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, nsfw_detected_ in enumerate(UpperCAmelCase__ ):
if nsfw_detected_:
__SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
__SCREAMING_SNAKE_CASE = self.w_head(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = watermark_detected.flatten()
__SCREAMING_SNAKE_CASE = watermark_detected > w_threshold
__SCREAMING_SNAKE_CASE = watermark_detected.tolist()
if any(UpperCAmelCase__ ):
logger.warning(
"Potential watermarked content was detected in one or more images. A black image will be returned instead."
" Try again with a different prompt and/or seed." )
for idx, watermark_detected_ in enumerate(UpperCAmelCase__ ):
if watermark_detected_:
__SCREAMING_SNAKE_CASE = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 195 | 0 |
'''simple docstring'''
import enum
import shutil
import sys
a__ : Union[str, Any] = shutil.get_terminal_size()
a__ : List[Any] = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class UpperCamelCase__ ( enum.Enum):
UpperCAmelCase__ : Dict = 0
UpperCAmelCase__ : str = 1
def snake_case ( UpperCAmelCase , UpperCAmelCase="" )-> Optional[int]:
"""simple docstring"""
sys.stdout.write(str(_A ) + end )
sys.stdout.flush()
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="" )-> int:
"""simple docstring"""
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , _A )
def snake_case ( )-> Union[str, Any]:
"""simple docstring"""
forceWrite('\r' )
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Any:
"""simple docstring"""
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def snake_case ( )-> int:
"""simple docstring"""
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def snake_case ( )-> Optional[Any]:
"""simple docstring"""
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 161 |
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__A : Tuple = logging.get_logger(__name__)
def __UpperCamelCase ( _A : str , _A : str , _A : str ) ->int:
"""simple docstring"""
lowerCamelCase_ =UniSpeechSatForSequenceClassification.from_pretrained(_A , config=_A )
lowerCamelCase_ =downstream_dict["""projector.weight"""]
lowerCamelCase_ =downstream_dict["""projector.bias"""]
lowerCamelCase_ =downstream_dict["""model.post_net.linear.weight"""]
lowerCamelCase_ =downstream_dict["""model.post_net.linear.bias"""]
return model
def __UpperCamelCase ( _A : Optional[int] , _A : str , _A : Any ) ->Optional[int]:
"""simple docstring"""
lowerCamelCase_ =UniSpeechSatForAudioFrameClassification.from_pretrained(_A , config=_A )
lowerCamelCase_ =downstream_dict["""model.linear.weight"""]
lowerCamelCase_ =downstream_dict["""model.linear.bias"""]
return model
def __UpperCamelCase ( _A : Optional[Any] , _A : Optional[Any] , _A : Optional[Any] ) ->List[Any]:
"""simple docstring"""
lowerCamelCase_ =UniSpeechSatForXVector.from_pretrained(_A , config=_A )
lowerCamelCase_ =downstream_dict["""connector.weight"""]
lowerCamelCase_ =downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCamelCase_ =downstream_dict[
f'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
lowerCamelCase_ =downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias']
lowerCamelCase_ =downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
lowerCamelCase_ =downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
lowerCamelCase_ =downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
lowerCamelCase_ =downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
lowerCamelCase_ =downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __UpperCamelCase ( _A : Any , _A : Optional[Any] , _A : Union[str, Any] , _A : str ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =torch.load(_A , map_location="""cpu""" )
lowerCamelCase_ =checkpoint["""Downstream"""]
lowerCamelCase_ =UniSpeechSatConfig.from_pretrained(_A )
lowerCamelCase_ =WavaVecaFeatureExtractor.from_pretrained(
_A , return_attention_mask=_A , do_normalize=_A )
lowerCamelCase_ =hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
lowerCamelCase_ =convert_classification(_A , _A , _A )
elif arch.endswith("""ForAudioFrameClassification""" ):
lowerCamelCase_ =convert_diarization(_A , _A , _A )
elif arch.endswith("""ForXVector""" ):
lowerCamelCase_ =convert_xvector(_A , _A , _A )
else:
raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
lowerCamelCase_ =checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(_A )
hf_model.save_pretrained(_A )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
__A : int = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 154 | 0 |
def lowerCAmelCase_ ( _lowercase : list[int]) -> int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(_lowercase , (list, tuple)) or not all(
isinstance(_lowercase , _lowercase) for number in numbers):
raise ValueError("""numbers must be an iterable of integers""")
a__ : Dict = numbers[0]
for i in range(1 , len(_lowercase)):
# update the maximum and minimum subarray products
a__ : Union[str, Any] = numbers[i]
if number < 0:
a__ : str = min_till_now, max_till_now
a__ : Optional[Any] = max(_lowercase , max_till_now * number)
a__ : Optional[int] = min(_lowercase , min_till_now * number)
# update the maximum product found till now
a__ : List[Any] = max(_lowercase , _lowercase)
return max_prod
| 371 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class snake_case__ (A__ ):
"""simple docstring"""
def __init__( self , __lowercase , __lowercase ) -> int:
"""simple docstring"""
a__ : Tuple = params
a__ : str = np.array(__lowercase )
a__ : List[Any] = np.array([len(__lowercase ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , __lowercase ) -> Any:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> Dict:
"""simple docstring"""
return len(self.lengths )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : int = self.params.max_model_input_size
a__ : int = self.lengths > max_len
logger.info(F'''Splitting {sum(__lowercase )} too long sequences.''' )
def divide_chunks(__lowercase , __lowercase ):
return [l[i : i + n] for i in range(0 , len(__lowercase ) , __lowercase )]
a__ : Any = []
a__ : Optional[int] = []
if self.params.mlm:
a__ , a__ : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""]
else:
a__ , a__ : Dict = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
a__ : int = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
a__ : str = np.insert(__lowercase , 0 , __lowercase )
if sub_s[-1] != sep_id:
a__ : List[str] = np.insert(__lowercase , len(__lowercase ) , __lowercase )
assert len(__lowercase ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(__lowercase )
new_tok_ids.extend(__lowercase )
new_lengths.extend([len(__lowercase ) for l in sub_seqs] )
a__ : Optional[int] = np.array(__lowercase )
a__ : Any = np.array(__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]:
"""simple docstring"""
a__ : Union[str, Any] = len(self )
a__ : List[str] = self.lengths > 1_1
a__ : Dict = self.token_ids[indices]
a__ : List[str] = self.lengths[indices]
a__ : int = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
a__ : Union[str, Any] = self.params.special_tok_ids["""unk_token"""]
a__ : List[Any] = len(self )
a__ : Optional[int] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
a__ : Optional[Any] = (unk_occs / self.lengths) < 0.5
a__ : Tuple = self.token_ids[indices]
a__ : Union[str, Any] = self.lengths[indices]
a__ : Tuple = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[int] = [t[0] for t in batch]
a__ : Any = [t[1] for t in batch]
assert len(__lowercase ) == len(__lowercase )
# Max for paddings
a__ : List[Any] = max(__lowercase )
# Pad token ids
if self.params.mlm:
a__ : int = self.params.special_tok_ids["""pad_token"""]
else:
a__ : List[str] = self.params.special_tok_ids["""unk_token"""]
a__ : int = [list(t.astype(__lowercase ) ) + [pad_idx] * (max_seq_len_ - len(__lowercase )) for t in token_ids]
assert len(tk_ ) == len(__lowercase )
assert all(len(__lowercase ) == max_seq_len_ for t in tk_ )
a__ : List[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
a__ : Optional[int] = torch.tensor(__lowercase ) # (bs)
return tk_t, lg_t
| 266 | 0 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class a ( a_ ):
def UpperCamelCase_ ( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
if tokenize_kwargs is None:
lowercase = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
lowercase = truncation
lowercase = tokenize_kwargs
lowercase = {}
if return_tensors is not None:
lowercase = return_tensors
return preprocess_params, {}, postprocess_params
def UpperCamelCase_ ( self , _lowerCamelCase , **_lowerCamelCase ):
lowercase = self.framework
lowercase = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
return model_inputs
def UpperCamelCase_ ( self , _lowerCamelCase ):
lowercase = self.model(**_lowerCamelCase )
return model_outputs
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase=False ):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *_lowerCamelCase , **_lowerCamelCase ):
return super().__call__(*_lowerCamelCase , **_lowerCamelCase )
| 220 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int = 1_00 ):
'''simple docstring'''
lowercase = 0
lowercase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 220 | 1 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
def __init__( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Tuple=13 , _lowerCAmelCase: Optional[Any]=30 , _lowerCAmelCase: List[Any]=2 , _lowerCAmelCase: Tuple=3 , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: str=True , _lowerCAmelCase: int=32 , _lowerCAmelCase: str=5 , _lowerCAmelCase: Tuple=4 , _lowerCAmelCase: Tuple=37 , _lowerCAmelCase: List[str]="gelu" , _lowerCAmelCase: Optional[int]=0.1 , _lowerCAmelCase: Dict=0.1 , _lowerCAmelCase: Any=10 , _lowerCAmelCase: List[str]=0.02 , _lowerCAmelCase: Optional[int]=3 , _lowerCAmelCase: List[Any]=0.6 , _lowerCAmelCase: str=None , ):
lowercase :Any = parent
lowercase :Optional[Any] = batch_size
lowercase :Any = image_size
lowercase :int = patch_size
lowercase :Optional[int] = num_channels
lowercase :Any = is_training
lowercase :Dict = use_labels
lowercase :List[str] = hidden_size
lowercase :Any = num_hidden_layers
lowercase :Optional[int] = num_attention_heads
lowercase :Optional[int] = intermediate_size
lowercase :Optional[int] = hidden_act
lowercase :Optional[Any] = hidden_dropout_prob
lowercase :Optional[int] = attention_probs_dropout_prob
lowercase :Union[str, Any] = type_sequence_label_size
lowercase :Optional[int] = initializer_range
lowercase :List[Any] = mask_ratio
lowercase :Optional[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase :int = (image_size // patch_size) ** 2
lowercase :Tuple = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase :List[str] = None
if self.use_labels:
lowercase :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase :Union[str, Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: List[str] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Any ):
lowercase :Tuple = ViTMAEModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :Dict = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: int , _lowerCAmelCase: Dict , _lowerCAmelCase: str ):
lowercase :List[str] = ViTMAEForPreTraining(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :Union[str, Any] = model(_lowerCAmelCase )
lowercase :Tuple = (self.image_size // self.patch_size) ** 2
lowercase :Tuple = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase :Any = 1
lowercase :Tuple = ViTMAEForPreTraining(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
lowercase :Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase :Dict = model(_lowerCAmelCase )
lowercase :int = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :List[Any] = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase :Tuple = config_and_inputs
lowercase :str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase):
_a = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_a = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :Dict = ViTMAEModelTester(self )
lowercase :List[str] = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self: Dict ):
pass
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase , lowercase :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :List[str] = model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase :Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase , lowercase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :List[Any] = model_class(_lowerCAmelCase )
lowercase :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase :Union[str, Any] = [*signature.parameters.keys()]
lowercase :List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
lowercase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Dict , _lowerCAmelCase: int , _lowerCAmelCase: List[Any] ):
# make masks reproducible
np.random.seed(2 )
lowercase :Dict = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowercase :Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase :Optional[int] = torch.from_numpy(_lowerCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase :Dict = pt_noise
super().check_pt_tf_models(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase , lowercase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase :Union[str, Any] = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase :Union[str, Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
lowercase :Optional[Any] = outputs[0].cpu().numpy()
lowercase :Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
lowercase :Optional[int] = model_class.from_pretrained(_lowerCAmelCase )
model.to(_lowerCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase :Dict = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) )
# Make sure we don't have nans
lowercase :Union[str, Any] = after_outputs[0].cpu().numpy()
lowercase :List[str] = 0
lowercase :Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowerCAmelCase , 1e-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def SCREAMING_SNAKE_CASE ( self: str ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def SCREAMING_SNAKE_CASE ( self: Dict ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase :Optional[Any] = ViTMAEModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def UpperCAmelCase__ ( ):
lowercase :Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase :Union[str, Any] = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(_lowerCAmelCase )
lowercase :List[str] = self.default_image_processor
lowercase :str = prepare_img()
lowercase :Any = image_processor(images=_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase :Any = ViTMAEConfig()
lowercase :str = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase :Union[str, Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowercase :Tuple = model(**_lowerCAmelCase , noise=torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase ) )
# verify the logits
lowercase :str = torch.Size((1, 1_96, 7_68) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
lowercase :List[Any] = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_lowerCAmelCase ) , atol=1e-4 ) )
| 158 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_UpperCAmelCase : List[str] = None
try:
import msvcrt
except ImportError:
_UpperCAmelCase : Tuple = None
try:
import fcntl
except ImportError:
_UpperCAmelCase : Optional[Any] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_UpperCAmelCase : Tuple = OSError
# Data
# ------------------------------------------------
_UpperCAmelCase : Optional[int] = [
"Timeout",
"BaseFileLock",
"WindowsFileLock",
"UnixFileLock",
"SoftFileLock",
"FileLock",
]
_UpperCAmelCase : Optional[Any] = "3.0.12"
_UpperCAmelCase : int = None
def UpperCAmelCase__ ( ):
global _logger
lowercase :List[str] = _logger or logging.getLogger(__name__ )
return _logger
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: int , _lowerCAmelCase: Dict ):
lowercase :Any = lock_file
return None
def __str__( self: Dict ):
lowercase :str = F"The file lock '{self.lock_file}' could not be acquired."
return temp
class __lowerCAmelCase :
def __init__( self: Tuple , _lowerCAmelCase: Any ):
lowercase :Optional[Any] = lock
return None
def __enter__( self: List[Any] ):
return self.lock
def __exit__( self: Dict , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[int] ):
self.lock.release()
return None
class __lowerCAmelCase :
def __init__( self: Optional[Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Tuple=-1 , _lowerCAmelCase: int=None ):
lowercase :Any = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
lowercase :int = self.hash_filename_if_too_long(_lowerCAmelCase , _lowerCAmelCase )
# The path to the lock file.
lowercase :List[Any] = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase :Any = None
# The default timeout value.
lowercase :Any = timeout
# We use this lock primarily for the lock counter.
lowercase :Optional[int] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase :Optional[int] = 0
return None
@property
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
return self._lock_file
@property
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[str] ):
lowercase :Tuple = float(_lowerCAmelCase )
return None
def SCREAMING_SNAKE_CASE ( self: int ):
raise NotImplementedError()
def SCREAMING_SNAKE_CASE ( self: int ):
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[Any]=None , _lowerCAmelCase: Union[str, Any]=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase :List[str] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase :Any = id(self )
lowercase :Optional[int] = self._lock_file
lowercase :Optional[Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(F"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
F"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(_lowerCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase :Union[str, Any] = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: Tuple=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase :Union[str, Any] = id(self )
lowercase :str = self._lock_file
logger().debug(F"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
lowercase :List[str] = 0
logger().debug(F"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__( self: Tuple ):
self.acquire()
return self
def __exit__( self: Union[str, Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Dict ):
self.release()
return None
def __del__( self: Optional[Any] ):
self.release(force=_lowerCAmelCase )
return None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: int ):
lowercase :Union[str, Any] = os.path.basename(_lowerCAmelCase )
if len(_lowerCAmelCase ) > max_length and max_length > 0:
lowercase :Dict = os.path.dirname(_lowerCAmelCase )
lowercase :Any = str(hash(_lowerCAmelCase ) )
lowercase :Union[str, Any] = filename[: max_length - len(_lowerCAmelCase ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(_lowerCAmelCase , _lowerCAmelCase )
else:
return path
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: int , _lowerCAmelCase: int , _lowerCAmelCase: Optional[Any]=-1 , _lowerCAmelCase: List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase )
lowercase :Optional[int] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase :Tuple = os.open(self._lock_file , _lowerCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(_lowerCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(_lowerCAmelCase )
else:
lowercase :Any = fd
return None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Any = self._lock_file_fd
lowercase :Tuple = None
msvcrt.locking(_lowerCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(_lowerCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __lowerCAmelCase ( lowerCAmelCase):
def __init__( self: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: Dict=-1 , _lowerCAmelCase: Tuple=None ):
lowercase :List[str] = os.statvfs(os.path.dirname(_lowerCAmelCase ) ).f_namemax
super().__init__(_lowerCAmelCase , timeout=_lowerCAmelCase , max_filename_length=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Any = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase :Optional[int] = os.open(self._lock_file , _lowerCAmelCase )
try:
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(_lowerCAmelCase )
else:
lowercase :Optional[Any] = fd
return None
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase :Dict = self._lock_file_fd
lowercase :Union[str, Any] = None
fcntl.flock(_lowerCAmelCase , fcntl.LOCK_UN )
os.close(_lowerCAmelCase )
return None
class __lowerCAmelCase ( lowerCAmelCase):
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :str = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase :List[Any] = os.open(self._lock_file , _lowerCAmelCase )
except OSError:
pass
else:
lowercase :int = fd
return None
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
os.close(self._lock_file_fd )
lowercase :int = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_UpperCAmelCase : Tuple = None
if msvcrt:
_UpperCAmelCase : str = WindowsFileLock
elif fcntl:
_UpperCAmelCase : List[Any] = UnixFileLock
else:
_UpperCAmelCase : Optional[int] = SoftFileLock
if warnings is not None:
warnings.warn("only soft file lock is available")
| 158 | 1 |
"""simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : list ):
if len(__SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(__SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> bool:
lowercase_ : Any = False
if low == high:
return swapped
lowercase_ : str = low
lowercase_ : int = high
while left < right:
if collection[left] > collection[right]:
lowercase_ , lowercase_ : Optional[Any] = (
collection[right],
collection[left],
)
lowercase_ : Tuple = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowercase_ , lowercase_ : Dict = (
collection[right + 1],
collection[left],
)
lowercase_ : str = True
lowercase_ : Optional[Any] = low + int((high - low) / 2 )
lowercase_ : str = circle_sort_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = circle_sort_util(__SCREAMING_SNAKE_CASE , mid + 1 , __SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
lowercase_ : Dict = True
while is_not_sorted is True:
lowercase_ : Optional[Any] = circle_sort_util(__SCREAMING_SNAKE_CASE , 0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter numbers separated by a comma:\n").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 213 | """simple docstring"""
def lowercase__( __SCREAMING_SNAKE_CASE : list ):
if len(__SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(__SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ) -> bool:
lowercase_ : Any = False
if low == high:
return swapped
lowercase_ : str = low
lowercase_ : int = high
while left < right:
if collection[left] > collection[right]:
lowercase_ , lowercase_ : Optional[Any] = (
collection[right],
collection[left],
)
lowercase_ : Tuple = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowercase_ , lowercase_ : Dict = (
collection[right + 1],
collection[left],
)
lowercase_ : str = True
lowercase_ : Optional[Any] = low + int((high - low) / 2 )
lowercase_ : str = circle_sort_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = circle_sort_util(__SCREAMING_SNAKE_CASE , mid + 1 , __SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
lowercase_ : Dict = True
while is_not_sorted is True:
lowercase_ : Optional[Any] = circle_sort_util(__SCREAMING_SNAKE_CASE , 0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =input("Enter numbers separated by a comma:\n").strip()
__SCREAMING_SNAKE_CASE =[int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 213 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] , snake_case : str )-> int:
_lowerCamelCase = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def SCREAMING_SNAKE_CASE_ ( snake_case : List[Any] , snake_case : Dict , snake_case : str )-> List[Any]:
_lowerCamelCase = 0
while b > 0:
if b & 1:
_lowerCamelCase = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 80 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
A_ : List[Any] =argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
A_ : List[str] =parser.parse_args()
A_ : Any =download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 80 | 1 |
from collections import Counter
from timeit import timeit
def _lowercase ( UpperCamelCase_ = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def _lowercase ( UpperCamelCase_ = "" ) -> bool:
'''simple docstring'''
if len(UpperCamelCase_ ) == 0:
return True
SCREAMING_SNAKE_CASE__ = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
SCREAMING_SNAKE_CASE__ = {}
for character in lower_case_input_str:
SCREAMING_SNAKE_CASE__ = character_freq_dict.get(UpperCamelCase_ , 0 ) + 1
SCREAMING_SNAKE_CASE__ = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _lowercase ( UpperCamelCase_ = "" ) -> None:
'''simple docstring'''
print('\nFor string = ' , UpperCamelCase_ , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(UpperCamelCase_ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(UpperCamelCase_ ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
__snake_case = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
__snake_case = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 176 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : str , UpperCAmelCase_ : int=None , UpperCAmelCase_ : List[str]=None , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
if config is None:
assert isinstance(self.model , UpperCAmelCase_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F' {self.model.__class__}'
)
SCREAMING_SNAKE_CASE__ = self.model.config
else:
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = data_args
SCREAMING_SNAKE_CASE__ = self.config.tgt_vocab_size if isinstance(self.config , UpperCAmelCase_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
' padding..' )
if self.args.label_smoothing == 0:
SCREAMING_SNAKE_CASE__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
SCREAMING_SNAKE_CASE__ = label_smoothed_nll_loss
def A_ ( self : Tuple , UpperCAmelCase_ : int ):
if self.optimizer is None:
SCREAMING_SNAKE_CASE__ = ['bias', 'LayerNorm.weight']
SCREAMING_SNAKE_CASE__ = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
SCREAMING_SNAKE_CASE__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
SCREAMING_SNAKE_CASE__ = Adafactor
SCREAMING_SNAKE_CASE__ = {'scale_parameter': False, 'relative_step': False}
else:
SCREAMING_SNAKE_CASE__ = AdamW
SCREAMING_SNAKE_CASE__ = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
SCREAMING_SNAKE_CASE__ = self.args.learning_rate
if self.sharded_ddp:
SCREAMING_SNAKE_CASE__ = OSS(
params=UpperCAmelCase_ , optim=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
SCREAMING_SNAKE_CASE__ = optimizer_cls(UpperCAmelCase_ , **UpperCAmelCase_ )
if self.lr_scheduler is None:
SCREAMING_SNAKE_CASE__ = self._get_lr_scheduler(UpperCAmelCase_ )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def A_ ( self : str , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
SCREAMING_SNAKE_CASE__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
SCREAMING_SNAKE_CASE__ = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
SCREAMING_SNAKE_CASE__ = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCAmelCase_ )
return scheduler
def A_ ( self : List[str] ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def A_ ( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase_ , use_cache=UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE__ = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase_ , labels=UpperCAmelCase_ , use_cache=UpperCAmelCase_ )[:2]
else:
# compute label smoothed loss
SCREAMING_SNAKE_CASE__ = model(**UpperCAmelCase_ , use_cache=UpperCAmelCase_ )[0]
SCREAMING_SNAKE_CASE__ = torch.nn.functional.log_softmax(UpperCAmelCase_ , dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.loss_fn(UpperCAmelCase_ , UpperCAmelCase_ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = inputs.pop('labels' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._compute_loss(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
return loss
def A_ ( self : List[str] , UpperCAmelCase_ : nn.Module , UpperCAmelCase_ : Dict[str, Union[torch.Tensor, Any]] , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[List[str]] = None , ):
SCREAMING_SNAKE_CASE__ = self._prepare_inputs(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
SCREAMING_SNAKE_CASE__ = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **UpperCAmelCase_ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
SCREAMING_SNAKE_CASE__ = self._pad_tensors_to_max_len(UpperCAmelCase_ , gen_kwargs['max_length'] )
SCREAMING_SNAKE_CASE__ = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self._compute_loss(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
SCREAMING_SNAKE_CASE__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
SCREAMING_SNAKE_CASE__ = self._pad_tensors_to_max_len(UpperCAmelCase_ , gen_kwargs['max_length'] )
return (loss, logits, labels)
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
SCREAMING_SNAKE_CASE__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
F' padded to `max_length`={max_length}' )
SCREAMING_SNAKE_CASE__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
SCREAMING_SNAKE_CASE__ = tensor
return padded_tensor
| 176 | 1 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = "▁"
lowercase__ = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
lowercase__ = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
lowercase__ = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
lowercase__ = {
"ernie-m-base": 514,
"ernie-m-large": 514,
}
lowercase__ = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = ["input_ids"]
UpperCAmelCase_ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Any = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Union[str, Any] = RESOURCE_FILES_NAMES
def __init__( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict=None , lowercase_ : Optional[Any]=False , lowercase_ : List[str]="utf8" , lowercase_ : List[Any]="[UNK]" , lowercase_ : List[str]="[SEP]" , lowercase_ : Any="[PAD]" , lowercase_ : List[Any]="[CLS]" , lowercase_ : Optional[int]="[MASK]" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : Tuple , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , vocab_file=lowercase_ , encoding=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase : Any = do_lower_case
UpperCAmelCase : int = sentencepiece_model_ckpt
UpperCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase : str = self.load_vocab(filepath=lowercase_ )
else:
UpperCAmelCase : List[Any] = {self.sp_model.id_to_piece(lowercase_ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase : List[str] = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase_ ( self : List[str] , lowercase_ : str ) -> int:
if text is None:
return None
UpperCAmelCase : Dict = self.tokenize(lowercase_ )
UpperCAmelCase , UpperCAmelCase : List[str] = '', []
for i, ch in enumerate(lowercase_ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase : Any = self.SP_CHAR_MAPPING.get(lowercase_ )
else:
UpperCAmelCase : Dict = unicodedata.normalize('NFKC' , lowercase_ )
if self.is_whitespace(lowercase_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(lowercase_ ) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase : Union[str, Any] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase : Optional[int] = token[1:]
UpperCAmelCase : Optional[Any] = text[offset:].index(lowercase_ ) + offset
UpperCAmelCase : List[Any] = start + len(lowercase_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase : str = end
return token_mapping
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
return len(self.vocab )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : str ) -> str:
UpperCAmelCase : List[str] = self.__dict__.copy()
UpperCAmelCase : Optional[Any] = None
return state
def __setstate__( self : Optional[int] , lowercase_ : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase : Union[str, Any] = {}
UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase_ ( self : str , lowercase_ : Optional[Any] ) -> str:
return "".join((self.SP_CHAR_MAPPING.get(lowercase_ , lowercase_ ) for c in text) )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int=False , lowercase_ : List[str]=64 , lowercase_ : Optional[int]=0.1 ) -> Optional[int]:
if self.sp_model_kwargs.get('enable_sampling' ) is True:
UpperCAmelCase : int = True
if self.sp_model_kwargs.get('alpha' ) is not None:
UpperCAmelCase : Any = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
UpperCAmelCase : Any = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
UpperCAmelCase : str = self.sp_model.EncodeAsPieces(lowercase_ )
else:
UpperCAmelCase : Optional[int] = self.sp_model.SampleEncodeAsPieces(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Dict = []
for pi, piece in enumerate(lowercase_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(lowercase_ ) and pi != 0:
new_pieces.append(lowercase_ )
continue
else:
continue
UpperCAmelCase : Optional[int] = 0
for i, chunk in enumerate(lowercase_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(lowercase_ ) or self.is_punct(lowercase_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(lowercase_ )
UpperCAmelCase : Optional[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase : List[Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase : List[str] = i
if len(lowercase_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Union[str, Any] ) -> Tuple:
UpperCAmelCase : int = ''.join(lowercase_ ).replace(lowercase_ , ' ' ).strip()
return out_string
def UpperCAmelCase_ ( self : str , lowercase_ : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : Any = self.convert_ids_to_tokens(lowercase_ )
UpperCAmelCase : List[str] = ''.join(lowercase_ ).replace(lowercase_ , ' ' ).strip()
return out_string
def UpperCAmelCase_ ( self : int , lowercase_ : List[str] ) -> int:
return self.vocab.get(lowercase_ , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Union[str, Any] ) -> List[str]:
return self.reverse_vocab.get(lowercase_ , self.unk_token )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : str=None ) -> Union[str, Any]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
UpperCAmelCase : Optional[int] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase_ ( self : List[str] , lowercase_ : str , lowercase_ : Optional[int]=None ) -> Any:
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase_ ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=None , lowercase_ : List[str]=False ) -> str:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1]
def UpperCAmelCase_ ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(lowercase_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(lowercase_ ) + 1) + [1] * (len(lowercase_ ) + 3)
def UpperCAmelCase_ ( self : List[str] , lowercase_ : List[str] ) -> Optional[Any]:
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase_ ( self : int , lowercase_ : Any ) -> Any:
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] ) -> Optional[Any]:
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase_ ( self : int , lowercase_ : int ) -> Dict:
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(lowercase_ ) == 1:
UpperCAmelCase : str = unicodedata.category(lowercase_ )
if cat == "Zs":
return True
return False
def UpperCAmelCase_ ( self : str , lowercase_ : Any ) -> Tuple:
UpperCAmelCase : List[Any] = {}
with io.open(lowercase_ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(lowercase_ ):
UpperCAmelCase : Any = line.rstrip('\n' )
UpperCAmelCase : int = int(lowercase_ )
return token_to_idx
def UpperCAmelCase_ ( self : str , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : Union[str, Any] = 0
if os.path.isdir(lowercase_ ):
UpperCAmelCase : List[Any] = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
UpperCAmelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(lowercase_ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda lowercase_ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
UpperCAmelCase : str = token_index
writer.write(token + '\n' )
index += 1
UpperCAmelCase : Any = os.path.join(lowercase_ , 'sentencepiece.bpe.model' )
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (vocab_file,)
| 280 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = """perceiver"""
def __init__( self : str , lowercase_ : List[str]=256 , lowercase_ : List[str]=1_280 , lowercase_ : str=768 , lowercase_ : Tuple=1 , lowercase_ : str=26 , lowercase_ : List[Any]=8 , lowercase_ : int=8 , lowercase_ : List[str]=None , lowercase_ : Dict=None , lowercase_ : int="kv" , lowercase_ : Union[str, Any]=1 , lowercase_ : List[str]=1 , lowercase_ : Any="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : str=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=262 , lowercase_ : Union[str, Any]=2_048 , lowercase_ : Optional[int]=56 , lowercase_ : int=[368, 496] , lowercase_ : str=16 , lowercase_ : Optional[int]=1_920 , lowercase_ : Tuple=16 , lowercase_ : int=[1, 16, 224, 224] , **lowercase_ : Union[str, Any] , ) -> List[str]:
super().__init__(**lowercase_ )
UpperCAmelCase : Union[str, Any] = num_latents
UpperCAmelCase : List[Any] = d_latents
UpperCAmelCase : Dict = d_model
UpperCAmelCase : Dict = num_blocks
UpperCAmelCase : Optional[int] = num_self_attends_per_block
UpperCAmelCase : Optional[Any] = num_self_attention_heads
UpperCAmelCase : Optional[Any] = num_cross_attention_heads
UpperCAmelCase : Tuple = qk_channels
UpperCAmelCase : Optional[int] = v_channels
UpperCAmelCase : str = cross_attention_shape_for_attention
UpperCAmelCase : Union[str, Any] = self_attention_widening_factor
UpperCAmelCase : List[Any] = cross_attention_widening_factor
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : int = attention_probs_dropout_prob
UpperCAmelCase : Optional[int] = initializer_range
UpperCAmelCase : Dict = layer_norm_eps
UpperCAmelCase : Any = use_query_residual
# masked language modeling attributes
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : List[Any] = max_position_embeddings
# image classification attributes
UpperCAmelCase : str = image_size
# flow attributes
UpperCAmelCase : Any = train_size
# multimodal autoencoding attributes
UpperCAmelCase : Any = num_frames
UpperCAmelCase : List[Any] = audio_samples_per_frame
UpperCAmelCase : Tuple = samples_per_patch
UpperCAmelCase : Union[str, Any] = output_shape
class A_ ( _snake_case ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCAmelCase : Tuple = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
] )
@property
def UpperCAmelCase_ ( self : str ) -> float:
return 1E-4
def UpperCAmelCase_ ( self : str , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , lowercase_ : int = 3 , lowercase_ : int = 40 , lowercase_ : int = 40 , ) -> Mapping[str, Any]:
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(lowercase_ , lowercase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Tuple = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase : int = preprocessor.num_special_tokens_to_add(lowercase_ )
UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase : List[Any] = [' '.join(['a'] ) * seq_length] * batch_size
UpperCAmelCase : Union[str, Any] = dict(preprocessor(lowercase_ , return_tensors=lowercase_ ) )
UpperCAmelCase : Union[str, Any] = inputs.pop('input_ids' )
return inputs
elif isinstance(lowercase_ , lowercase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase : Tuple = compute_effective_axis_dimension(lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase : Any = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase : Tuple = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) )
UpperCAmelCase : Dict = inputs.pop('pixel_values' )
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
| 280 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
lowerCamelCase_ = '''pytorch_model.bin'''
lowerCamelCase_ = '''pytorch_model.bin.index.json'''
lowerCamelCase_ = '''adapter_config.json'''
lowerCamelCase_ = '''adapter_model.bin'''
lowerCamelCase_ = '''adapter_model.safetensors'''
lowerCamelCase_ = '''tf_model.h5'''
lowerCamelCase_ = '''tf_model.h5.index.json'''
lowerCamelCase_ = '''model.ckpt'''
lowerCamelCase_ = '''flax_model.msgpack'''
lowerCamelCase_ = '''flax_model.msgpack.index.json'''
lowerCamelCase_ = '''model.safetensors'''
lowerCamelCase_ = '''model.safetensors.index.json'''
lowerCamelCase_ = '''config.json'''
lowerCamelCase_ = '''preprocessor_config.json'''
lowerCamelCase_ = FEATURE_EXTRACTOR_NAME
lowerCamelCase_ = '''generation_config.json'''
lowerCamelCase_ = '''modelcard.json'''
lowerCamelCase_ = '''▁'''
lowerCamelCase_ = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
lowerCamelCase_ = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
lowerCamelCase_ = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
lowerCamelCase_ = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def snake_case ( A__ ):
if version.parse(A__ ) < version.parse(A__ ):
if "dev" in min_version:
UpperCAmelCase_ : Tuple = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
UpperCAmelCase_ : int = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 268 |
"""simple docstring"""
def snake_case ( A__ ,A__ ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
UpperCAmelCase_ : Dict = (boundary[1] - boundary[0]) / steps
UpperCAmelCase_ : Optional[int] = boundary[0]
UpperCAmelCase_ : str = boundary[1]
UpperCAmelCase_ : Tuple = make_points(A__ ,A__ ,A__ )
UpperCAmelCase_ : List[str] = 0.0
y += (h / 2.0) * f(A__ )
for i in x_i:
# print(i)
y += h * f(A__ )
y += (h / 2.0) * f(A__ )
return y
def snake_case ( A__ ,A__ ,A__ ):
UpperCAmelCase_ : Union[str, Any] = a + h
while x < (b - h):
yield x
UpperCAmelCase_ : Optional[Any] = x + h
def snake_case ( A__ ): # enter your function here
UpperCAmelCase_ : Dict = (x - 0) * (x - 0)
return y
def snake_case ( ):
UpperCAmelCase_ : Dict = 0.0 # Lower bound of integration
UpperCAmelCase_ : Optional[int] = 1.0 # Upper bound of integration
UpperCAmelCase_ : Dict = 10.0 # define number of steps or resolution
UpperCAmelCase_ : List[Any] = [a, b] # define boundary of integration
UpperCAmelCase_ : Union[str, Any] = method_a(A__ ,A__ )
print(F"""y = {y}""" )
if __name__ == "__main__":
main()
| 268 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : List[str] = DebertaTokenizer
_UpperCamelCase : int = True
_UpperCamelCase : Dict = DebertaTokenizerFast
def __a ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
lowercase : Dict = dict(zip(_A , range(len(_A ) ) ) )
lowercase : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase : Optional[Any] = {'''unk_token''': '''[UNK]'''}
lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def __a ( self : Optional[int] , **_A : Tuple ) -> Tuple:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def __a ( self : List[Any] , _A : List[Any] ) -> int:
"""simple docstring"""
lowercase : Tuple = '''lower newer'''
lowercase : List[Any] = '''lower newer'''
return input_text, output_text
def __a ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase : Dict = self.get_tokenizer()
lowercase : Optional[Any] = '''lower newer'''
lowercase : List[str] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase : Union[str, Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
lowercase : Any = tokens + [tokenizer.unk_token]
lowercase : int = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
def __a ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase : Tuple = self.get_tokenizer()
lowercase : List[str] = tokenizer('''Hello''' , '''World''' )
lowercase : Optional[int] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , _A )
@slow
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowercase : int = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
lowercase : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
lowercase : Dict = tokenizer.encode(
'''sequence builders''' , add_special_tokens=_A , add_prefix_space=_A )
lowercase : str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_A , add_prefix_space=_A )
lowercase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A )
lowercase : int = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __a ( self : int ) -> List[str]:
"""simple docstring"""
lowercase : Union[str, Any] = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowercase : int = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
lowercase : Optional[Any] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
lowercase : Optional[int] = tokenizer(_A , padding=_A )
lowercase : Any = [tokenizer.decode(_A , skip_special_tokens=_A ) for seq in encoding['''input_ids''']]
# fmt: off
lowercase : Optional[int] = {
'''input_ids''': [
[1, 2_118, 11_126, 565, 35, 83, 25_191, 163, 18_854, 13, 12_156, 12, 16_101, 25_376, 13_807, 9, 22_205, 27_893, 1_635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_118, 11_126, 565, 24_536, 80, 43_797, 4_878, 7_373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3_724, 1_538, 33_183, 11_303, 43_797, 1_938, 4, 870, 24_165, 29_105, 5, 739, 32_644, 33_183, 11_303, 36_173, 88, 80, 650, 7_821, 45_940, 6, 52, 2_559, 5, 1_836, 9, 5, 7_397, 13_171, 31, 5, 1_836, 9, 32_644, 33_183, 11_303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowercase : List[str] = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , _A )
for expected, decoded in zip(_A , _A ):
self.assertEqual(_A , _A ) | 116 |
from string import ascii_uppercase
lowerCAmelCase_ = {char: i for i, char in enumerate(ascii_uppercase)}
lowerCAmelCase_ = dict(enumerate(ascii_uppercase))
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Optional[Any] = len(__magic_name__ )
lowercase : Any = 0
while True:
if x == i:
lowercase : Any = 0
if len(__magic_name__ ) == len(__magic_name__ ):
break
key += key[i]
i += 1
return key
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : str = ''''''
lowercase : Dict = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
lowercase : Dict = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def snake_case( __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Any = ''''''
lowercase : str = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
lowercase : Any = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def snake_case( ) -> None:
'''simple docstring'''
lowercase : Dict = '''THE GERMAN ATTACK'''
lowercase : Dict = '''SECRET'''
lowercase : Union[str, Any] = generate_key(__magic_name__ , __magic_name__ )
lowercase : List[str] = cipher_text(__magic_name__ , __magic_name__ )
print(F"""Encrypted Text = {s}""" )
print(F"""Original Text = {original_text(__magic_name__ , __magic_name__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 116 | 1 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__a :List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__a :Optional[Any] = ' def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n'
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Dict ):
A_ = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) )
A_ = self.transformer_dir
shutil.copy(
os.path.join(UpperCAmelCase , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , )
def __A ( self : int ):
A_ = "src/transformers"
shutil.rmtree(self.transformer_dir )
def __A ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : str=None ):
A_ = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
A_ = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
A_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
A_ = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
A_ = os.path.join(self.transformer_dir , "new_code.py" )
with open(UpperCAmelCase , "w" , newline="\n" ) as f:
f.write(UpperCAmelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(UpperCAmelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=UpperCAmelCase )
with open(UpperCAmelCase , "r" ) as f:
self.assertTrue(f.read() , UpperCAmelCase )
def __A ( self : Union[str, Any] ):
A_ = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : int ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , UpperCAmelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , UpperCAmelCase ) , )
# Copy consistency with a really long name
A_ = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub("Bert" , UpperCAmelCase , UpperCAmelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , UpperCAmelCase , overwrite_result=re.sub("Bert" , "TestModel" , UpperCAmelCase ) , )
def __A ( self : int ):
A_ = check_copies.LOCALIZED_READMES["README_zh-hans.md"]
A_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"
" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"
" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"
" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"
" Luong, Quoc V. Le, Christopher D. Manning."
)
A_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."
" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"
" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"
" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"
" method has been applied to compress GPT2 into"
" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"
" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"
" Multilingual BERT into"
" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"
" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"
" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"
" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"
" Christopher D. Manning 发布。\n"
)
A_ , A_ = check_copies.convert_to_localized_md(
UpperCAmelCase , UpperCAmelCase , localized_readme["format_model_list"] )
self.assertFalse(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
A_ , A_ = check_copies.convert_to_localized_md(
UpperCAmelCase , UpperCAmelCase , localized_readme["format_model_list"] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(UpperCAmelCase )
A_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"
" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"
" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"
" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."
)
A_ = (
"1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"
" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A_ = (
"1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"
" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"
" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"
" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"
)
A_ , A_ = check_copies.convert_to_localized_md(
UpperCAmelCase , UpperCAmelCase , localized_readme["format_model_list"] )
# Check if the model link is synchronized.
self.assertEqual(UpperCAmelCase , UpperCAmelCase ) | 312 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
__a :Any = TypeVar('T')
__a :Union[str, Any] = Union[List[T], Tuple[T, ...]]
__a :List[str] = Union[T, List[T], Dict[str, T]]
__a :Any = Union[str, bytes, os.PathLike] | 312 | 1 |
import requests
__A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> None:
"""simple docstring"""
__lowerCamelCase = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 354 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=32 * 8 , lowerCamelCase__=32 * 8 , lowerCamelCase__=4 , lowerCamelCase__=64 , ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = is_training
__lowerCamelCase = use_auxiliary_loss
__lowerCamelCase = num_queries
__lowerCamelCase = num_channels
__lowerCamelCase = min_size
__lowerCamelCase = max_size
__lowerCamelCase = num_labels
__lowerCamelCase = hidden_dim
__lowerCamelCase = hidden_dim
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
__lowerCamelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
__lowerCamelCase = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
__lowerCamelCase = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
__lowerCamelCase = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowerCamelCase = self.num_queries
__lowerCamelCase = self.num_labels
__lowerCamelCase = [1, 1, 1, 1]
__lowerCamelCase = self.num_channels
__lowerCamelCase = 64
__lowerCamelCase = 128
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
__lowerCamelCase = self.hidden_dim
return config
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = output.encoder_hidden_states
__lowerCamelCase = output.pixel_decoder_hidden_states
__lowerCamelCase = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ) -> Tuple:
'''simple docstring'''
with torch.no_grad():
__lowerCamelCase = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowerCamelCase = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
__lowerCamelCase = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
__lowerCamelCase = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
pass
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowercase_ ( self ) -> int:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowerCamelCase = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = (self.model_tester.min_size,) * 2
__lowerCamelCase = {
'pixel_values': torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
'mask_labels': torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
'class_labels': torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
__lowerCamelCase = self.model_tester.get_config()
__lowerCamelCase = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
__lowerCamelCase = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = self.all_model_classes[1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
__lowerCamelCase = True
__lowerCamelCase = True
__lowerCamelCase = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
__lowerCamelCase = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
__lowerCamelCase = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowerCamelCase = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__A = 1e-4
def lowerCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
__lowerCamelCase = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
__lowerCamelCase = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
__lowerCamelCase = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 384, 384) )
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
# masks_queries_logits
__lowerCamelCase = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowerCamelCase = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
__lowerCamelCase = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
__lowerCamelCase = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowerCamelCase = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__lowerCamelCase = inputs['pixel_values'].to(lowerCamelCase__ )
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['mask_labels']]
__lowerCamelCase = [el.to(lowerCamelCase__ ) for el in inputs['class_labels']]
with torch.no_grad():
__lowerCamelCase = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 348 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__snake_case = logging.getLogger(__name__)
@dataclass
class __lowerCamelCase (_a ):
_lowercase = field(
default=0.0 , metadata={"""help""": """The label smoothing epsilon to apply (if not zero)."""} )
_lowercase = field(default=_a , metadata={"""help""": """Whether to SortishSamler or not."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""} )
_lowercase = field(default=_a , metadata={"""help""": """whether to use adafactor"""} )
_lowercase = field(
default=_a , metadata={"""help""": """Encoder layer dropout probability. Goes into model.config."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Decoder layer dropout probability. Goes into model.config."""} )
_lowercase = field(default=_a , metadata={"""help""": """Dropout probability. Goes into model.config."""} )
_lowercase = field(
default=_a , metadata={"""help""": """Attention dropout probability. Goes into model.config."""} )
_lowercase = field(
default="""linear""" , metadata={"""help""": f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 310 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _A ( _lowercase = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
__UpperCamelCase = BeautifulSoup(requests.get(url + location ).content , 'html.parser' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('div' , attrs={'data-tn-component': 'organicJob'} ):
__UpperCamelCase = job.find('a' , attrs={'data-tn-element': 'jobTitle'} ).text.strip()
__UpperCamelCase = job.find('span' , {'class': 'company'} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 310 | 1 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowerCAmelCase_ ( unittest.TestCase , lowerCAmelCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = load_tool('text-classification' )
self.tool.setup()
snake_case = load_tool('text-classification' , remote=lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(lowerCAmelCase , 'positive' )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.remote_tool('That\'s quite cool' , ['positive', 'negative'] )
self.assertEqual(lowerCAmelCase , 'positive' )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(lowerCAmelCase , 'positive' )
def snake_case ( self ):
"""simple docstring"""
snake_case = self.remote_tool(text='That\'s quite cool' , labels=['positive', 'negative'] )
self.assertEqual(lowerCAmelCase , 'positive' )
| 149 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : Tuple = """swinv2"""
_lowerCAmelCase : Any = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase=2_24 , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=96 , lowerCAmelCase=[2, 2, 6, 2] , lowerCAmelCase=[3, 6, 12, 24] , lowerCAmelCase=7 , lowerCAmelCase=4.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=False , lowerCAmelCase=0.02 , lowerCAmelCase=1E-5 , lowerCAmelCase=32 , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase )
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = embed_dim
snake_case = depths
snake_case = len(lowerCAmelCase )
snake_case = num_heads
snake_case = window_size
snake_case = mlp_ratio
snake_case = qkv_bias
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = drop_path_rate
snake_case = hidden_act
snake_case = use_absolute_embeddings
snake_case = layer_norm_eps
snake_case = initializer_range
snake_case = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case = int(embed_dim * 2 ** (len(lowerCAmelCase ) - 1) )
snake_case = (0, 0, 0, 0)
| 149 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class _snake_case ( __a ):
'''simple docstring'''
A__ : int = "bridgetower_vision_model"
def __init__( self: Tuple ,lowerCamelCase_: Optional[int]=768 ,lowerCamelCase_: Any=12 ,lowerCamelCase_: Tuple=3 ,lowerCamelCase_: Optional[Any]=16 ,lowerCamelCase_: Dict=288 ,lowerCamelCase_: Optional[Any]=1 ,lowerCamelCase_: Optional[Any]=1e-05 ,lowerCamelCase_: int=False ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: Union[str, Any]=False ,**lowerCamelCase_: Any ,) -> List[str]:
super().__init__(**a_ )
UpperCAmelCase_ : Tuple = hidden_size
UpperCAmelCase_ : List[Any] = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : Optional[int] = patch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Optional[Any] = initializer_factor
UpperCAmelCase_ : str = layer_norm_eps
UpperCAmelCase_ : Tuple = stop_gradient
UpperCAmelCase_ : Any = share_layernorm
UpperCAmelCase_ : Dict = remove_last_layer
@classmethod
def A__ ( cls: List[Any] ,lowerCamelCase_: Union[str, os.PathLike] ,**lowerCamelCase_: List[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = cls.get_config_dict(a_ ,**a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
UpperCAmelCase_ : int = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a_ ,**a_ )
class _snake_case ( __a ):
'''simple docstring'''
A__ : Optional[int] = "bridgetower_text_model"
def __init__( self: Tuple ,lowerCamelCase_: Union[str, Any]=50265 ,lowerCamelCase_: int=768 ,lowerCamelCase_: Optional[Any]=12 ,lowerCamelCase_: Dict=12 ,lowerCamelCase_: int=1 ,lowerCamelCase_: Any=3072 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: Union[str, Any]=0.1 ,lowerCamelCase_: str=514 ,lowerCamelCase_: Dict=1 ,lowerCamelCase_: str=1e-05 ,lowerCamelCase_: Union[str, Any]=1 ,lowerCamelCase_: List[Any]=0 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: str="absolute" ,lowerCamelCase_: Union[str, Any]=True ,**lowerCamelCase_: int ,) -> Optional[Any]:
super().__init__(**a_ )
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Optional[int] = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Tuple = num_attention_heads
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : List[str] = initializer_factor
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : int = type_vocab_size
UpperCAmelCase_ : List[Any] = layer_norm_eps
UpperCAmelCase_ : Union[str, Any] = position_embedding_type
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : Tuple = pad_token_id
UpperCAmelCase_ : Dict = bos_token_id
UpperCAmelCase_ : Any = eos_token_id
@classmethod
def A__ ( cls: Dict ,lowerCamelCase_: Union[str, os.PathLike] ,**lowerCamelCase_: Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = cls.get_config_dict(a_ ,**a_ )
if config_dict.get("""model_type""" ) == "bridgetower":
UpperCAmelCase_ : List[str] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(a_ ,**a_ )
class _snake_case ( __a ):
'''simple docstring'''
A__ : int = "bridgetower"
def __init__( self: Union[str, Any] ,lowerCamelCase_: Optional[int]=True ,lowerCamelCase_: List[Any]="gelu" ,lowerCamelCase_: str=768 ,lowerCamelCase_: Dict=1 ,lowerCamelCase_: Optional[int]=1e-05 ,lowerCamelCase_: List[str]=False ,lowerCamelCase_: str="add" ,lowerCamelCase_: List[str]=12 ,lowerCamelCase_: Dict=6 ,lowerCamelCase_: List[Any]=False ,lowerCamelCase_: Optional[int]=False ,lowerCamelCase_: int=None ,lowerCamelCase_: Optional[Any]=None ,**lowerCamelCase_: Any ,) -> Union[str, Any]:
UpperCAmelCase_ : Any = kwargs.pop("""text_config_dict""" ,a_ )
UpperCAmelCase_ : Dict = kwargs.pop("""vision_config_dict""" ,a_ )
super().__init__(**a_ )
UpperCAmelCase_ : Tuple = share_cross_modal_transformer_layers
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Any = initializer_factor
UpperCAmelCase_ : List[Any] = layer_norm_eps
UpperCAmelCase_ : Tuple = share_link_tower_layers
UpperCAmelCase_ : Optional[int] = link_tower_type
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Optional[Any] = num_hidden_layers
UpperCAmelCase_ : List[str] = tie_word_embeddings
UpperCAmelCase_ : Any = init_layernorm_from_vision_encoder
if text_config is None:
UpperCAmelCase_ : Any = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
UpperCAmelCase_ : Optional[int] = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
UpperCAmelCase_ : int = BridgeTowerTextConfig(**a_ )
UpperCAmelCase_ : List[str] = BridgeTowerVisionConfig(**a_ )
@classmethod
def A__ ( cls: Union[str, Any] ,lowerCamelCase_: BridgeTowerTextConfig ,lowerCamelCase_: BridgeTowerVisionConfig ,**lowerCamelCase_: Dict ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**a_ )
def A__ ( self: int ) -> Tuple:
UpperCAmelCase_ : List[str] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Dict = self.text_config.to_dict()
UpperCAmelCase_ : List[str] = self.vision_config.to_dict()
UpperCAmelCase_ : List[str] = self.__class__.model_type
return output
| 345 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
A_ = logging.get_logger(__name__)
A_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
A_ = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
A_ = {
'''yjernite/retribert-base-uncased''': 5_12,
}
A_ = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = PRETRAINED_INIT_CONFIGURATION
lowercase__ = RetriBertTokenizer
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self: int, a_: int=None, a_: Dict=None, a_: Any=True, a_: int="[UNK]", a_: Any="[SEP]", a_: List[Any]="[PAD]", a_: List[Any]="[CLS]", a_: str="[MASK]", a_: Dict=True, a_: Optional[int]=None, **a_: Tuple, ):
'''simple docstring'''
super().__init__(
a_, tokenizer_file=a_, do_lower_case=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, tokenize_chinese_chars=a_, strip_accents=a_, **a_, )
_snake_case : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", a_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", a_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", a_ ) != tokenize_chinese_chars
):
_snake_case : Dict = getattr(a_, normalizer_state.pop("""type""" ) )
_snake_case : List[Any] = do_lower_case
_snake_case : List[str] = strip_accents
_snake_case : Tuple = tokenize_chinese_chars
_snake_case : Tuple = normalizer_class(**a_ )
_snake_case : List[str] = do_lower_case
def UpperCamelCase_ ( self: Any, a_: str, a_: Optional[int]=None ):
'''simple docstring'''
_snake_case : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self: List[str], a_: List[int], a_: Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self: Dict, a_: str, a_: Optional[str] = None ):
'''simple docstring'''
_snake_case : Union[str, Any] = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
| 64 | 0 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[int]=1024 , snake_case_ : str=1024 , snake_case_ : Dict=False , **snake_case_ : Optional[int] ) -> Optional[int]:
__snake_case = AutoTokenizer.from_pretrained(snake_case_ )
__snake_case = SeqaSeqDataset(snake_case_ , snake_case_ , snake_case_ , snake_case_ , type_path='''train''' , **snake_case_ )
__snake_case = tok.pad_token_id
def get_lens(snake_case_ : str ):
__snake_case = tqdm(
DataLoader(snake_case_ , batch_size=512 , num_workers=8 , shuffle=snake_case_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__snake_case = []
for batch in dl:
__snake_case = batch['''input_ids'''].ne(snake_case_ ).sum(1 ).tolist()
__snake_case = batch['''labels'''].ne(snake_case_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(snake_case_ , snake_case_ ):
max_lens.append(max(snake_case_ , snake_case_ ) )
else:
max_lens.extend(snake_case_ )
return max_lens
__snake_case = get_lens(snake_case_ )
__snake_case = SeqaSeqDataset(snake_case_ , snake_case_ , snake_case_ , snake_case_ , type_path='''val''' , **snake_case_ )
__snake_case = get_lens(snake_case_ )
pickle_save(snake_case_ , train_ds.len_file )
pickle_save(snake_case_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 238 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case_ = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
snake_case_ = {'allegro/herbert-base-cased': 514}
snake_case_ = {}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Dict = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Any = HerbertTokenizer
def __init__(self : Dict , a__ : Tuple=None , a__ : Optional[int]=None , a__ : List[str]=None , a__ : Optional[int]="<s>" , a__ : Optional[Any]="<unk>" , a__ : Any="<pad>" , a__ : List[Any]="<mask>" , a__ : Any="</s>" , **a__ : Tuple , ):
"""simple docstring"""
super().__init__(
a__ , a__ , tokenizer_file=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , sep_token=a__ , **a__ , )
def a (self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a (self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a (self : Optional[int] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a (self : int , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
__snake_case = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 238 | 1 |
def lowercase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> float:
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(SCREAMING_SNAKE_CASE__ ) * abs(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 317 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=True , A=None , A=0.9 , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> Dict:
snake_case : Optional[int] = size if size is not None else {"""shortest_edge""": 3_0}
snake_case : Optional[int] = crop_size if crop_size is not None else {"""height""": 3_0, """width""": 3_0}
snake_case : int = parent
snake_case : List[str] = batch_size
snake_case : Any = num_channels
snake_case : Optional[Any] = min_resolution
snake_case : Any = max_resolution
snake_case : Dict = do_resize_and_center_crop
snake_case : Any = size
snake_case : List[Any] = crop_pct
snake_case : int = crop_size
snake_case : int = do_normalize
snake_case : List[Any] = image_mean
snake_case : Tuple = image_std
def UpperCAmelCase ( self ) -> int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : str = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> Dict:
snake_case : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(A , """size""" ) )
self.assertTrue(hasattr(A , """crop_pct""" ) )
self.assertTrue(hasattr(A , """do_normalize""" ) )
self.assertTrue(hasattr(A , """image_mean""" ) )
self.assertTrue(hasattr(A , """image_std""" ) )
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 3_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 3_0, """width""": 3_0} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : Tuple = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
snake_case : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : Any = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processing
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : int = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 124 | 0 |
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( a__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowercase : Tuple ="ssube/stable-diffusion-x4-upscaler-onnx"
def lowercase__ ( self, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 128, 128), rng=random.Random(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase_ =torch.manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ =image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
lowerCamelCase_ =PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
lowerCamelCase_ =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
lowerCamelCase_ =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
lowerCamelCase_ =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ort.SessionOptions()
lowerCamelCase_ =False
return options
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ =init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ='A fantasy landscape, trending on artstation'
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =pipe(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, guidance_scale=7.5, num_inference_steps=10, generator=SCREAMING_SNAKE_CASE_, output_type='''np''', )
lowerCamelCase_ =output.images
lowerCamelCase_ =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ =init_image.resize((128, 128) )
lowerCamelCase_ =LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''', subfolder='''scheduler''' )
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''', scheduler=SCREAMING_SNAKE_CASE_, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ ='A fantasy landscape, trending on artstation'
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =pipe(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, guidance_scale=7.5, num_inference_steps=20, generator=SCREAMING_SNAKE_CASE_, output_type='''np''', )
lowerCamelCase_ =output.images
lowerCamelCase_ =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 364 |
'''simple docstring'''
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[str] =['image_processor', 'tokenizer']
lowercase : Optional[int] ='AutoImageProcessor'
lowercase : List[str] ='AutoTokenizer'
def __init__( self, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''', lowerCAmelCase, )
lowerCamelCase_ =kwargs.pop('''feature_extractor''' )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def __call__( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''images''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''text''', lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
lowerCamelCase_ =args[0]
lowerCamelCase_ =args[1:]
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowerCamelCase_ =self.image_processor(lowerCAmelCase, *lowerCAmelCase, **lowerCAmelCase )
if text is not None:
lowerCamelCase_ =self.tokenizer(lowerCAmelCase, **lowerCAmelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCamelCase_ =encodings['''input_ids''']
return inputs
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase, **lowerCAmelCase )
def lowercase__ ( self, *lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase, **lowerCAmelCase )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your images inputs, or in a separate call.''' )
lowerCamelCase_ =True
lowerCamelCase_ =self.tokenizer
yield
lowerCamelCase_ =self.image_processor
lowerCamelCase_ =False
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=False, lowerCAmelCase=None ):
"""simple docstring"""
if added_vocab is None:
lowerCamelCase_ =self.tokenizer.get_added_vocab()
lowerCamelCase_ ={}
while tokens:
lowerCamelCase_ =re.search(R'''<s_(.*?)>''', lowerCAmelCase, re.IGNORECASE )
if start_token is None:
break
lowerCamelCase_ =start_token.group(1 )
lowerCamelCase_ =re.search(Rf'''</s_{key}>''', lowerCAmelCase, re.IGNORECASE )
lowerCamelCase_ =start_token.group()
if end_token is None:
lowerCamelCase_ =tokens.replace(lowerCAmelCase, '''''' )
else:
lowerCamelCase_ =end_token.group()
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.escape(lowerCAmelCase )
lowerCamelCase_ =re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''', lowerCAmelCase, re.IGNORECASE )
if content is not None:
lowerCamelCase_ =content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
lowerCamelCase_ =self.tokenajson(lowerCAmelCase, is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if value:
if len(lowerCAmelCase ) == 1:
lowerCamelCase_ =value[0]
lowerCamelCase_ =value
else: # leaf nodes
lowerCamelCase_ =[]
for leaf in content.split(R'''<sep/>''' ):
lowerCamelCase_ =leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
lowerCamelCase_ =leaf[1:-2] # for categorical special tokens
output[key].append(lowerCAmelCase )
if len(output[key] ) == 1:
lowerCamelCase_ =output[key][0]
lowerCamelCase_ =tokens[tokens.find(lowerCAmelCase ) + len(lowerCAmelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:], is_inner_value=lowerCAmelCase, added_vocab=lowerCAmelCase )
if len(lowerCAmelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''', lowerCAmelCase, )
return self.image_processor_class
@property
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''', lowerCAmelCase, )
return self.image_processor
| 6 | 0 |
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
UpperCAmelCase__ = None
try:
import msvcrt
except ImportError:
UpperCAmelCase__ = None
try:
import fcntl
except ImportError:
UpperCAmelCase__ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
UpperCAmelCase__ = OSError
# Data
# ------------------------------------------------
UpperCAmelCase__ = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
UpperCAmelCase__ = '''3.0.12'''
UpperCAmelCase__ = None
def UpperCAmelCase_ ( ) -> Optional[int]:
"""simple docstring"""
global _logger
_lowercase =_logger or logging.getLogger(__name__ )
return _logger
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase ) -> Optional[Any]:
_lowercase =lock_file
return None
def __str__(self ) -> str:
_lowercase =f"The file lock '{self.lock_file}' could not be acquired."
return temp
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase ) -> Dict:
_lowercase =lock
return None
def __enter__(self ) -> Optional[Any]:
return self.lock
def __exit__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
self.lock.release()
return None
class lowerCamelCase__ :
def __init__(self , UpperCAmelCase , UpperCAmelCase=-1 , UpperCAmelCase=None ) -> Dict:
_lowercase =max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
_lowercase =self.hash_filename_if_too_long(UpperCAmelCase , UpperCAmelCase )
# The path to the lock file.
_lowercase =lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_lowercase =None
# The default timeout value.
_lowercase =timeout
# We use this lock primarily for the lock counter.
_lowercase =threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_lowercase =0
return None
@property
def __A (self ) -> int:
return self._lock_file
@property
def __A (self ) -> Optional[Any]:
return self._timeout
@timeout.setter
def __A (self , UpperCAmelCase ) -> Union[str, Any]:
_lowercase =float(UpperCAmelCase )
return None
def __A (self ) -> Any:
raise NotImplementedError()
def __A (self ) -> List[Any]:
raise NotImplementedError()
@property
def __A (self ) -> Tuple:
return self._lock_file_fd is not None
def __A (self , UpperCAmelCase=None , UpperCAmelCase=0.05 ) -> Union[str, Any]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
_lowercase =self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_lowercase =id(self )
_lowercase =self._lock_file
_lowercase =time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"Attempting to acquire lock {lock_id} on {lock_filename}" )
self._acquire()
if self.is_locked:
logger().debug(f"Lock {lock_id} acquired on {lock_filename}" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"Timeout on acquiring lock {lock_id} on {lock_filename}" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ..." )
time.sleep(UpperCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_lowercase =max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def __A (self , UpperCAmelCase=False ) -> Union[str, Any]:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_lowercase =id(self )
_lowercase =self._lock_file
logger().debug(f"Attempting to release lock {lock_id} on {lock_filename}" )
self._release()
_lowercase =0
logger().debug(f"Lock {lock_id} released on {lock_filename}" )
return None
def __enter__(self ) -> List[Any]:
self.acquire()
return self
def __exit__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
self.release()
return None
def __del__(self ) -> Optional[Any]:
self.release(force=UpperCAmelCase )
return None
def __A (self , UpperCAmelCase , UpperCAmelCase ) -> str:
_lowercase =os.path.basename(UpperCAmelCase )
if len(UpperCAmelCase ) > max_length and max_length > 0:
_lowercase =os.path.dirname(UpperCAmelCase )
_lowercase =str(hash(UpperCAmelCase ) )
_lowercase =filename[: max_length - len(UpperCAmelCase ) - 8] + '''...''' + hashed_filename + '''.lock'''
return os.path.join(UpperCAmelCase , UpperCAmelCase )
else:
return path
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase=-1 , UpperCAmelCase=None ) -> Tuple:
from .file_utils import relative_to_absolute_path
super().__init__(UpperCAmelCase , timeout=UpperCAmelCase , max_filename_length=UpperCAmelCase )
_lowercase ='''\\\\?\\''' + relative_to_absolute_path(self.lock_file )
def __A (self ) -> Any:
_lowercase =os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_lowercase =os.open(self._lock_file , UpperCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(UpperCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(UpperCAmelCase )
else:
_lowercase =fd
return None
def __A (self ) -> Optional[int]:
_lowercase =self._lock_file_fd
_lowercase =None
msvcrt.locking(UpperCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(UpperCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase=-1 , UpperCAmelCase=None ) -> Tuple:
_lowercase =os.statvfs(os.path.dirname(UpperCAmelCase ) ).f_namemax
super().__init__(UpperCAmelCase , timeout=UpperCAmelCase , max_filename_length=UpperCAmelCase )
def __A (self ) -> int:
_lowercase =os.O_RDWR | os.O_CREAT | os.O_TRUNC
_lowercase =os.open(self._lock_file , UpperCAmelCase )
try:
fcntl.flock(UpperCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(UpperCAmelCase )
else:
_lowercase =fd
return None
def __A (self ) -> Any:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_lowercase =self._lock_file_fd
_lowercase =None
fcntl.flock(UpperCAmelCase , fcntl.LOCK_UN )
os.close(UpperCAmelCase )
return None
class lowerCamelCase__ ( lowerCAmelCase):
def __A (self ) -> Dict:
_lowercase =os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_lowercase =os.open(self._lock_file , UpperCAmelCase )
except OSError:
pass
else:
_lowercase =fd
return None
def __A (self ) -> Any:
os.close(self._lock_file_fd )
_lowercase =None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
UpperCAmelCase__ = None
if msvcrt:
UpperCAmelCase__ = WindowsFileLock
elif fcntl:
UpperCAmelCase__ = UnixFileLock
else:
UpperCAmelCase__ = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''')
| 5 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = [
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
__UpperCamelCase = {
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {f'''funnel-transformer/{name}''': 512 for name in _model_names}
__UpperCamelCase = {f'''funnel-transformer/{name}''': {'''do_lower_case''': True} for name in _model_names}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ : str = FunnelTokenizer
SCREAMING_SNAKE_CASE_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : int = 2
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__="##" , **lowerCAmelCase__ , ) -> Tuple:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , clean_text=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , wordpieces_prefix=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get('strip_accents' , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , lowerCAmelCase__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(lowerCAmelCase__ , normalizer_state.pop('type' ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = do_lower_case
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 113 | 0 |
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
lowercase__ : Dict = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def UpperCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : int = Github(os.environ['GITHUB_TOKEN'] )
lowerCAmelCase_ : Any = g.get_repo('huggingface/transformers' )
lowerCAmelCase_ : Union[str, Any] = repo.get_issues(state='open' )
for issue in open_issues:
lowerCAmelCase_ : str = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
lowerCAmelCase_ : Dict = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 364 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCamelCase__ :
"""simple docstring"""
pass
| 289 | 0 |
'''simple docstring'''
UpperCAmelCase_ = tuple[float, float, float]
UpperCAmelCase_ = tuple[float, float, float]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Pointad , SCREAMING_SNAKE_CASE__ : Pointad ):
'''simple docstring'''
UpperCAmelCase__ = end_pointa[0] - end_pointa[0]
UpperCAmelCase__ = end_pointa[1] - end_pointa[1]
UpperCAmelCase__ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Vectorad , SCREAMING_SNAKE_CASE__ : Vectorad ):
'''simple docstring'''
UpperCAmelCase__ = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCAmelCase__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCAmelCase__ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Vectorad , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
return tuple(round(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for x in vector ) == (0, 0, 0)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Pointad , SCREAMING_SNAKE_CASE__ : Pointad , SCREAMING_SNAKE_CASE__ : Pointad , SCREAMING_SNAKE_CASE__ : int = 10 ):
'''simple docstring'''
UpperCAmelCase__ = create_vector(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = create_vector(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return is_zero_vector(get_ad_vectors_cross(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
| 346 |
'''simple docstring'''
from timeit import timeit
UpperCAmelCase_ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = 0
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) // 2
UpperCAmelCase__ = len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return s == s[::-1]
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
UpperCAmelCase__ = F'''all({name}(key) is value for key, value in test_data.items())'''
UpperCAmelCase__ = F'''from __main__ import test_data, {name}'''
UpperCAmelCase__ = 500000
UpperCAmelCase__ = timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f"{key:21} {value}")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 346 | 1 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __UpperCAmelCase ( a_: Any, a_: List[Any]=False ):
try:
_UpperCAmelCase : Any = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase : List[str] = strtobool(a_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"""If set, {key} must be yes or no.""" )
return _value
__a = parse_flag_from_env('RUN_SLOW', default=False)
def __UpperCAmelCase ( a_: Any ):
return unittest.skip("Test was skipped" )(a_ )
def __UpperCAmelCase ( a_: Dict ):
return unittest.skipUnless(_run_slow_tests, "test is slow" )(a_ )
def __UpperCAmelCase ( a_: Union[str, Any] ):
return unittest.skipUnless(not torch.cuda.is_available(), "test requires only a CPU" )(a_ )
def __UpperCAmelCase ( a_: List[Any] ):
return unittest.skipUnless(torch.cuda.is_available(), "test requires a GPU" )(a_ )
def __UpperCAmelCase ( a_: Any ):
return unittest.skipUnless(is_xpu_available(), "test requires a XPU" )(a_ )
def __UpperCAmelCase ( a_: List[Any] ):
return unittest.skipUnless(is_mps_available(), "test requires a `mps` backend support in `torch`" )(a_ )
def __UpperCAmelCase ( a_: Tuple ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available(), "test requires the Hugging Face suite" )(a_ )
def __UpperCAmelCase ( a_: Any ):
return unittest.skipUnless(is_bnb_available(), "test requires the bitsandbytes library" )(a_ )
def __UpperCAmelCase ( a_: Dict ):
return unittest.skipUnless(is_tpu_available(), "test requires TPU" )(a_ )
def __UpperCAmelCase ( a_: int ):
return unittest.skipUnless(torch.cuda.device_count() == 1, "test requires a GPU" )(a_ )
def __UpperCAmelCase ( a_: Union[str, Any] ):
return unittest.skipUnless(torch.xpu.device_count() == 1, "test requires a XPU" )(a_ )
def __UpperCAmelCase ( a_: Tuple ):
return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs" )(a_ )
def __UpperCAmelCase ( a_: Union[str, Any] ):
return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs" )(a_ )
def __UpperCAmelCase ( a_: Union[str, Any] ):
return unittest.skipUnless(is_safetensors_available(), "test requires safetensors" )(a_ )
def __UpperCAmelCase ( a_: Dict ):
return unittest.skipUnless(is_deepspeed_available(), "test requires DeepSpeed" )(a_ )
def __UpperCAmelCase ( a_: Tuple ):
return unittest.skipUnless(is_torch_version(">=", "1.12.0" ), "test requires torch version >= 1.12.0" )(a_ )
def __UpperCAmelCase ( a_: int=None, a_: Union[str, Any]=None ):
if test_case is None:
return partial(a_, version=a_ )
return unittest.skipUnless(is_torch_version(">=", a_ ), f"""test requires torch version >= {version}""" )(a_ )
def __UpperCAmelCase ( a_: Dict ):
return unittest.skipUnless(is_tensorboard_available(), "test requires Tensorboard" )(a_ )
def __UpperCAmelCase ( a_: int ):
return unittest.skipUnless(is_wandb_available(), "test requires wandb" )(a_ )
def __UpperCAmelCase ( a_: Optional[Any] ):
return unittest.skipUnless(is_comet_ml_available(), "test requires comet_ml" )(a_ )
__a = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __UpperCAmelCase ( a_: Dict ):
return unittest.skipUnless(
_atleast_one_tracker_available, "test requires at least one tracker to be available and for `comet_ml` to not be installed", )(a_ )
class A__ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = True
@classmethod
def _lowerCAmelCase ( cls : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
@classmethod
def _lowerCAmelCase ( cls : int ) -> Tuple:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(lowerCAmelCase__ )
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Union[mock.Mock, List[mock.Mock]] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = mocks if isinstance(lowerCAmelCase__ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __UpperCAmelCase ( a_: Any ):
_UpperCAmelCase : List[Any] = AcceleratorState()
_UpperCAmelCase : List[Any] = tensor[None].clone().to(state.device )
_UpperCAmelCase : int = gather(a_ ).cpu()
_UpperCAmelCase : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i], a_ ):
return False
return True
class A__ :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = returncode
_UpperCAmelCase : Optional[int] = stdout
_UpperCAmelCase : str = stderr
async def __UpperCAmelCase ( a_: Union[str, Any], a_: List[Any] ):
while True:
_UpperCAmelCase : List[str] = await stream.readline()
if line:
callback(a_ )
else:
break
async def __UpperCAmelCase ( a_: Optional[Any], a_: Dict=None, a_: Dict=None, a_: List[Any]=None, a_: Optional[Any]=False, a_: Optional[Any]=False ):
if echo:
print("\nRunning: ", " ".join(a_ ) )
_UpperCAmelCase : Union[str, Any] = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], stdin=a_, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE, env=a_, )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : List[str] = []
def tee(a_: Union[str, Any], a_: str, a_: Optional[Any], a_: Union[str, Any]="" ):
_UpperCAmelCase : Any = line.decode("utf-8" ).rstrip()
sink.append(a_ )
if not quiet:
print(a_, a_, file=a_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout, lambda a_ : tee(a_, a_, sys.stdout, label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr, lambda a_ : tee(a_, a_, sys.stderr, label="stderr:" ) ) ),
], timeout=a_, )
return _RunOutput(await p.wait(), a_, a_ )
def __UpperCAmelCase ( a_: Optional[Any], a_: Optional[Any]=None, a_: int=None, a_: Any=180, a_: Optional[int]=False, a_: int=True ):
_UpperCAmelCase : Union[str, Any] = asyncio.get_event_loop()
_UpperCAmelCase : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(a_, env=a_, stdin=a_, timeout=a_, quiet=a_, echo=a_ ) )
_UpperCAmelCase : Tuple = " ".join(a_ )
if result.returncode > 0:
_UpperCAmelCase : Any = "\n".join(result.stderr )
raise RuntimeError(
f"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
f"""The combined stderr from workers follows:\n{stderr}""" )
return result
class A__ ( UpperCamelCase ):
"""simple docstring"""
pass
def __UpperCAmelCase ( a_: List[str], a_: str=False ):
try:
_UpperCAmelCase : List[str] = subprocess.check_output(a_, stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(a_, "decode" ):
_UpperCAmelCase : List[Any] = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f"""Command `{' '.join(a_ )}` failed with the following error:\n\n{e.output.decode()}""" ) from e | 17 | '''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__a = (3, 9, -11, 0, 7, 5, 1, -1)
__a = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class A__ :
"""simple docstring"""
UpperCamelCase_ : int
UpperCamelCase_ : Node | None
class A__ :
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : Iterable[int] ) -> None:
"""simple docstring"""
_UpperCAmelCase : Node | None = None
for i in sorted(lowerCAmelCase__ , reverse=lowerCAmelCase__ ):
_UpperCAmelCase : str = Node(lowerCAmelCase__ , self.head )
def __iter__( self : int ) -> Iterator[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.head
while node:
yield node.data
_UpperCAmelCase : List[str] = node.next_node
def __len__( self : Any ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return " -> ".join([str(lowerCAmelCase__ ) for node in self] )
def __UpperCAmelCase ( a_: SortedLinkedList, a_: SortedLinkedList ):
return SortedLinkedList(list(a_ ) + list(a_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__a = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 17 | 1 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
A : int = logging.get_logger(__name__)
A : Optional[int] = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a=None , **__a ):
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__lowerCAmelCase = model
__lowerCAmelCase = kwargs.get("model_save_dir" , __a )
__lowerCAmelCase = kwargs.get("latest_model_name" , __a )
def __call__( self , **__a ):
__lowerCAmelCase = {k: np.array(__a ) for k, v in kwargs.items()}
return self.model.run(__a , __a )
@staticmethod
def snake_case ( __a , __a=None , __a=None ):
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__lowerCAmelCase = "CPUExecutionProvider"
return ort.InferenceSession(__a , providers=[provider] , sess_options=__a )
def snake_case ( self , __a , __a = None , **__a ):
__lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__lowerCAmelCase = self.model_save_dir.joinpath(self.latest_model_name )
__lowerCAmelCase = Path(__a ).joinpath(__a )
try:
shutil.copyfile(__a , __a )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__lowerCAmelCase = self.model_save_dir.joinpath(__a )
if src_path.exists():
__lowerCAmelCase = Path(__a ).joinpath(__a )
try:
shutil.copyfile(__a , __a )
except shutil.SameFileError:
pass
def snake_case ( self , __a , **__a , ):
if os.path.isfile(__a ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(__a , exist_ok=__a )
# saving model weights/files
self._save_pretrained(__a , **__a )
@classmethod
def snake_case ( cls , __a , __a = None , __a = None , __a = False , __a = None , __a = None , __a = None , __a = None , **__a , ):
__lowerCAmelCase = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__a ):
__lowerCAmelCase = OnnxRuntimeModel.load_model(
os.path.join(__a , __a ) , provider=__a , sess_options=__a )
__lowerCAmelCase = Path(__a )
# load model from hub
else:
# download model
__lowerCAmelCase = hf_hub_download(
repo_id=__a , filename=__a , use_auth_token=__a , revision=__a , cache_dir=__a , force_download=__a , )
__lowerCAmelCase = Path(__a ).parent
__lowerCAmelCase = Path(__a ).name
__lowerCAmelCase = OnnxRuntimeModel.load_model(__a , provider=__a , sess_options=__a )
return cls(model=__a , **__a )
@classmethod
def snake_case ( cls , __a , __a = True , __a = None , __a = None , **__a , ):
__lowerCAmelCase = None
if len(str(__a ).split("@" ) ) == 2:
__lowerCAmelCase , __lowerCAmelCase = model_id.split("@" )
return cls._from_pretrained(
model_id=__a , revision=__a , cache_dir=__a , force_download=__a , use_auth_token=__a , **__a , )
| 57 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _a ( unittest.TestCase):
def __init__( self : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str=13 , _SCREAMING_SNAKE_CASE : List[str]=7 , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Optional[int]=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : Optional[int]=99 , _SCREAMING_SNAKE_CASE : Optional[int]=32 , _SCREAMING_SNAKE_CASE : Union[str, Any]=5 , _SCREAMING_SNAKE_CASE : Optional[int]=4 , _SCREAMING_SNAKE_CASE : Optional[int]=37 , _SCREAMING_SNAKE_CASE : Any="gelu" , _SCREAMING_SNAKE_CASE : Tuple=0.1 , _SCREAMING_SNAKE_CASE : Any=0.1 , _SCREAMING_SNAKE_CASE : Tuple=512 , _SCREAMING_SNAKE_CASE : Optional[int]=16 , _SCREAMING_SNAKE_CASE : Optional[int]=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE : Tuple=4 , )-> Optional[int]:
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : List[Any] = seq_length
lowerCAmelCase__ : Any = is_training
lowerCAmelCase__ : str = use_attention_mask
lowerCAmelCase__ : Union[str, Any] = use_token_type_ids
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : int = num_choices
def UpperCAmelCase__( self : List[str] )-> Optional[Any]:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_attention_mask:
lowerCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_SCREAMING_SNAKE_CASE , )
return config, input_ids, attention_mask
def UpperCAmelCase__( self : Dict )-> Union[str, Any]:
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = config_and_inputs
lowerCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _a ( _lowercase , unittest.TestCase):
_a : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__( self : str )-> List[str]:
lowerCAmelCase__ : Tuple = FlaxDistilBertModelTester(self )
@slow
def UpperCAmelCase__( self : Dict )-> Tuple:
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class_name.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_flax
class _a ( unittest.TestCase):
@slow
def UpperCAmelCase__( self : Optional[int] )-> List[Any]:
lowerCAmelCase__ : int = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ : Tuple = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCAmelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase__ : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase__ : str = (1, 11, 768)
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 131 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
def wrapper(*_lowercase , **_lowercase ):
UpperCAmelCase_ : Optional[int] = timeit.default_timer()
UpperCAmelCase_ : Any = func(*_lowercase , **_lowercase )
UpperCAmelCase_ : Union[str, Any] = timeit.default_timer() - starttime
return delta
UpperCAmelCase_ : List[Any] = func.__name__
return wrapper
def lowerCamelCase__ ( _lowercase , _lowercase=100 , _lowercase=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : Union[str, Any] = seq_shapes or {}
for i in range(_lowercase ):
UpperCAmelCase_ : Union[str, Any] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_lowercase , _ArrayXD ):
UpperCAmelCase_ : List[Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_lowercase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase_ : str = '''The small grey turtle was surprisingly fast when challenged.'''
else:
UpperCAmelCase_ : Union[str, Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(_lowercase , datasets.Sequence ):
while isinstance(_lowercase , datasets.Sequence ):
UpperCAmelCase_ : Union[str, Any] = v.feature
UpperCAmelCase_ : str = seq_shapes[k]
UpperCAmelCase_ : List[Any] = np.random.rand(*_lowercase ).astype(v.dtype )
UpperCAmelCase_ : Any = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=100 , _lowercase=None ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = generate_examples(_lowercase , num_examples=_lowercase , seq_shapes=_lowercase )
with ArrowWriter(features=_lowercase , path=_lowercase ) as writer:
for key, record in dummy_data:
UpperCAmelCase_ : List[str] = features.encode_example(_lowercase )
writer.write(_lowercase )
UpperCAmelCase_ : List[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
UpperCAmelCase_ : List[str] = datasets.Dataset.from_file(filename=_lowercase , info=datasets.DatasetInfo(features=_lowercase ) )
return dataset | 358 |
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__a = None
__a = '<' if sys.byteorder == 'little' else '>'
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__a = [
np.dtype('|b1'),
np.dtype('|u1'),
np.dtype('<u2'),
np.dtype('>u2'),
np.dtype('<i2'),
np.dtype('>i2'),
np.dtype('<u4'),
np.dtype('>u4'),
np.dtype('<i4'),
np.dtype('>i4'),
np.dtype('<f4'),
np.dtype('>f4'),
np.dtype('<f8'),
np.dtype('>f8'),
]
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = None
# Automatically constructed
lowerCAmelCase = "PIL.Image.Image"
lowerCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
lowerCAmelCase = field(default='''Image''' , init=_a , repr=_a )
def __call__( self ) -> Optional[Any]:
return self.pa_type
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[int] = np.array(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": value, "bytes": None}
elif isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
return {"path": None, "bytes": value}
elif isinstance(_SCREAMING_SNAKE_CASE ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_SCREAMING_SNAKE_CASE )
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Image(decode=True) instead.''' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support decoding images, please install \'Pillow\'.''' )
if token_per_repo_id is None:
UpperCAmelCase_ : str = {}
UpperCAmelCase_, UpperCAmelCase_ : List[str] = value['''path'''], value['''bytes''']
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = PIL.Image.open(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : int = path.split('''::''' )[-1]
try:
UpperCAmelCase_ : str = string_to_dict(_SCREAMING_SNAKE_CASE ,config.HUB_DATASETS_URL )['''repo_id''']
UpperCAmelCase_ : Optional[Any] = token_per_repo_id.get(_SCREAMING_SNAKE_CASE )
except ValueError:
UpperCAmelCase_ : Any = None
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ,use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
UpperCAmelCase_ : List[str] = BytesIO(f.read() )
UpperCAmelCase_ : Optional[int] = PIL.Image.open(bytes_ )
else:
UpperCAmelCase_ : Dict = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def a__ ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
UpperCAmelCase_ : Union[str, Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
UpperCAmelCase_ : int = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase_ : Tuple = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : int = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
UpperCAmelCase_ : str = storage.field('''bytes''' )
else:
UpperCAmelCase_ : Union[str, Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
UpperCAmelCase_ : List[str] = storage.field('''path''' )
else:
UpperCAmelCase_ : List[Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Any = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase_ : Optional[Any] = pa.array(
[encode_np_array(np.array(_SCREAMING_SNAKE_CASE ) )['''bytes'''] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
UpperCAmelCase_ : Any = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) ,type=pa.string() )
UpperCAmelCase_ : Any = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE ,'''rb''' ) as f:
UpperCAmelCase_ : List[Any] = f.read()
return bytes_
UpperCAmelCase_ : Dict = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
UpperCAmelCase_ : Union[str, Any] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
UpperCAmelCase_ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE ,self.pa_type )
def lowerCamelCase__ ( ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase_ : Optional[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase_ : List[str] = image.format
else:
UpperCAmelCase_ : int = '''PNG''' if image.mode in ['''1''', '''L''', '''LA''', '''RGB''', '''RGBA'''] else '''TIFF'''
image.save(_lowercase , format=_lowercase )
return buffer.getvalue()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if hasattr(_lowercase , '''filename''' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
UpperCAmelCase_ : int = array.dtype
UpperCAmelCase_ : Optional[Any] = dtype.byteorder if dtype.byteorder != '''=''' else _NATIVE_BYTEORDER
UpperCAmelCase_ : str = dtype.kind
UpperCAmelCase_ : int = dtype.itemsize
UpperCAmelCase_ : Optional[int] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase_ : str = np.dtype('''|u1''' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase_ : Dict = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase_ : Optional[int] = dtype_byteorder + dtype_kind + str(_lowercase )
UpperCAmelCase_ : Dict = np.dtype(_lowercase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
UpperCAmelCase_ : Dict = PIL.Image.fromarray(array.astype(_lowercase ) )
return {"path": None, "bytes": image_to_bytes(_lowercase )}
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('''To support encoding images, please install \'Pillow\'.''' )
if objs:
UpperCAmelCase_, UpperCAmelCase_ : List[Any] = first_non_null_value(_lowercase )
if isinstance(_lowercase , _lowercase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowercase , np.ndarray ):
UpperCAmelCase_ : List[str] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
elif isinstance(_lowercase , PIL.Image.Image ):
UpperCAmelCase_ : Union[str, Any] = no_op_if_value_is_null(_lowercase )
return [obj_to_image_dict_func(_lowercase ) for obj in objs]
else:
return objs
else:
return objs | 235 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : Union[str, Any] = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] ='speech_to_text_2'
lowercase : Tuple =['past_key_values']
lowercase : Optional[int] ={'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self, lowerCAmelCase=10_000, lowerCAmelCase=6, lowerCAmelCase=2_048, lowerCAmelCase=4, lowerCAmelCase=0.0, lowerCAmelCase=True, lowerCAmelCase="relu", lowerCAmelCase=256, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=2, lowerCAmelCase=True, lowerCAmelCase=1, lowerCAmelCase=0, lowerCAmelCase=2, lowerCAmelCase=1_024, **lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =vocab_size
lowerCamelCase_ =d_model
lowerCamelCase_ =decoder_ffn_dim
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =decoder_attention_heads
lowerCamelCase_ =dropout
lowerCamelCase_ =attention_dropout
lowerCamelCase_ =activation_dropout
lowerCamelCase_ =activation_function
lowerCamelCase_ =init_std
lowerCamelCase_ =decoder_layerdrop
lowerCamelCase_ =use_cache
lowerCamelCase_ =decoder_layers
lowerCamelCase_ =scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ =max_target_positions
super().__init__(
pad_token_id=lowerCAmelCase, bos_token_id=lowerCAmelCase, eos_token_id=lowerCAmelCase, decoder_start_token_id=lowerCAmelCase, **lowerCAmelCase, )
| 75 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a_ : List[Any] = logging.get_logger(__name__)
a_ : Tuple = OrderedDict(
[
("""audio-spectrogram-transformer""", """ASTFeatureExtractor"""),
("""beit""", """BeitFeatureExtractor"""),
("""chinese_clip""", """ChineseCLIPFeatureExtractor"""),
("""clap""", """ClapFeatureExtractor"""),
("""clip""", """CLIPFeatureExtractor"""),
("""clipseg""", """ViTFeatureExtractor"""),
("""conditional_detr""", """ConditionalDetrFeatureExtractor"""),
("""convnext""", """ConvNextFeatureExtractor"""),
("""cvt""", """ConvNextFeatureExtractor"""),
("""data2vec-audio""", """Wav2Vec2FeatureExtractor"""),
("""data2vec-vision""", """BeitFeatureExtractor"""),
("""deformable_detr""", """DeformableDetrFeatureExtractor"""),
("""deit""", """DeiTFeatureExtractor"""),
("""detr""", """DetrFeatureExtractor"""),
("""dinat""", """ViTFeatureExtractor"""),
("""donut-swin""", """DonutFeatureExtractor"""),
("""dpt""", """DPTFeatureExtractor"""),
("""encodec""", """EncodecFeatureExtractor"""),
("""flava""", """FlavaFeatureExtractor"""),
("""glpn""", """GLPNFeatureExtractor"""),
("""groupvit""", """CLIPFeatureExtractor"""),
("""hubert""", """Wav2Vec2FeatureExtractor"""),
("""imagegpt""", """ImageGPTFeatureExtractor"""),
("""layoutlmv2""", """LayoutLMv2FeatureExtractor"""),
("""layoutlmv3""", """LayoutLMv3FeatureExtractor"""),
("""levit""", """LevitFeatureExtractor"""),
("""maskformer""", """MaskFormerFeatureExtractor"""),
("""mctct""", """MCTCTFeatureExtractor"""),
("""mobilenet_v1""", """MobileNetV1FeatureExtractor"""),
("""mobilenet_v2""", """MobileNetV2FeatureExtractor"""),
("""mobilevit""", """MobileViTFeatureExtractor"""),
("""nat""", """ViTFeatureExtractor"""),
("""owlvit""", """OwlViTFeatureExtractor"""),
("""perceiver""", """PerceiverFeatureExtractor"""),
("""poolformer""", """PoolFormerFeatureExtractor"""),
("""regnet""", """ConvNextFeatureExtractor"""),
("""resnet""", """ConvNextFeatureExtractor"""),
("""segformer""", """SegformerFeatureExtractor"""),
("""sew""", """Wav2Vec2FeatureExtractor"""),
("""sew-d""", """Wav2Vec2FeatureExtractor"""),
("""speech_to_text""", """Speech2TextFeatureExtractor"""),
("""speecht5""", """SpeechT5FeatureExtractor"""),
("""swiftformer""", """ViTFeatureExtractor"""),
("""swin""", """ViTFeatureExtractor"""),
("""swinv2""", """ViTFeatureExtractor"""),
("""table-transformer""", """DetrFeatureExtractor"""),
("""timesformer""", """VideoMAEFeatureExtractor"""),
("""tvlt""", """TvltFeatureExtractor"""),
("""unispeech""", """Wav2Vec2FeatureExtractor"""),
("""unispeech-sat""", """Wav2Vec2FeatureExtractor"""),
("""van""", """ConvNextFeatureExtractor"""),
("""videomae""", """VideoMAEFeatureExtractor"""),
("""vilt""", """ViltFeatureExtractor"""),
("""vit""", """ViTFeatureExtractor"""),
("""vit_mae""", """ViTFeatureExtractor"""),
("""vit_msn""", """ViTFeatureExtractor"""),
("""wav2vec2""", """Wav2Vec2FeatureExtractor"""),
("""wav2vec2-conformer""", """Wav2Vec2FeatureExtractor"""),
("""wavlm""", """Wav2Vec2FeatureExtractor"""),
("""whisper""", """WhisperFeatureExtractor"""),
("""xclip""", """CLIPFeatureExtractor"""),
("""yolos""", """YolosFeatureExtractor"""),
]
)
a_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def a_ ( __snake_case : str ) -> Any:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ =model_type_to_module_name(__snake_case )
lowerCamelCase_ =importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(__snake_case , __snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__snake_case , '''__name__''' , __snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ =importlib.import_module('''transformers''' )
if hasattr(__snake_case , __snake_case ):
return getattr(__snake_case , __snake_case )
return None
def a_ ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =get_file_from_repo(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(__snake_case , encoding='''utf-8''' ) as reader:
return json.load(__snake_case )
class __UpperCamelCase :
def __init__( self ):
"""simple docstring"""
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(lowerCAmelCase )
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =kwargs.pop('''config''', lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''trust_remote_code''', lowerCAmelCase )
lowerCamelCase_ =True
lowerCamelCase_, lowerCamelCase_ =FeatureExtractionMixin.get_feature_extractor_dict(lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =config_dict.get('''feature_extractor_type''', lowerCAmelCase )
lowerCamelCase_ =None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''', {} ):
lowerCamelCase_ =config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =AutoConfig.from_pretrained(lowerCAmelCase, **lowerCAmelCase )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ =getattr(lowerCAmelCase, '''feature_extractor_type''', lowerCAmelCase )
if hasattr(lowerCAmelCase, '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ =config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ =feature_extractor_class_from_name(lowerCAmelCase )
lowerCamelCase_ =feature_extractor_auto_map is not None
lowerCamelCase_ =feature_extractor_class is not None or type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ =resolve_trust_remote_code(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase )
if has_remote_code and trust_remote_code:
lowerCamelCase_ =get_class_from_dynamic_module(
lowerCAmelCase, lowerCAmelCase, **lowerCAmelCase )
lowerCamelCase_ =kwargs.pop('''code_revision''', lowerCAmelCase )
if os.path.isdir(lowerCAmelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(lowerCAmelCase ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ =FEATURE_EXTRACTOR_MAPPING[type(lowerCAmelCase )]
return feature_extractor_class.from_dict(lowerCAmelCase, **lowerCAmelCase )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowercase__ ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(lowerCAmelCase, lowerCAmelCase )
| 75 | 1 |
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
while b:
UpperCamelCase : str = b, a % b
return a
def a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(SCREAMING_SNAKE_CASE_ , a % b )
def a ( ):
"""simple docstring"""
print(F"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(F"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(F"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(F"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(F"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(F"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(F"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(F"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(F"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 366 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "ibert"
def __init__( self , __SCREAMING_SNAKE_CASE=30_522 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="none" , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Tuple = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = quant_mode
UpperCamelCase : Any = force_dequant
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 315 | 0 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( a ):
return getitem, k
def _lowerCamelCase( a , a ):
return setitem, k, v
def _lowerCamelCase( a ):
return delitem, k
def _lowerCamelCase( a , a , *a ):
try:
return fun(a , *a ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__:List[Any] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__:List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__:Any = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__:int = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__:Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase( a ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(a ):
__a , __a = _run_operation(a , a , *a )
__a , __a = _run_operation(a , a , *a )
assert my_res == py_res
assert str(a ) == str(a )
assert set(a ) == set(a )
assert len(a ) == len(a )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ):
def is_public(a ) -> bool:
return not name.startswith("_" )
__a = {name for name in dir({} ) if is_public(a )}
__a = {name for name in dir(HashMap() ) if is_public(a )}
assert dict_public_names > hash_public_names
| 261 | """simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = use_labels
__a = scope
def a__ ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.get_config()
return config, input_ids, input_mask, token_labels
def a__ ( self ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , )
def a__ ( self ):
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.prepare_config_and_inputs()
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase )
__a = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = BertGenerationEncoder(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ):
__a = True
__a = True
__a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval()
# first forward pass
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
__a = model(
lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ):
__a = BertGenerationDecoder(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ):
__a , __a , __a , __a = self.prepare_config_and_inputs()
__a = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ):
_snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else ()
_snake_case : Union[str, Any] = (
{"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder}
if is_torch_available()
else {}
)
def a__ ( self ):
__a = BertGenerationEncoderTester(self )
__a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def a__ ( self ):
self.config_tester.run_common_tests()
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def a__ ( self ):
__a , __a , __a , __a = self.model_tester.prepare_config_and_inputs()
__a = "bert"
self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase )
def a__ ( self ):
# This regression test was failing with PyTorch < 1.3
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
__a = None
self.model_tester.create_and_check_model_as_decoder(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , )
def a__ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase )
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class snake_case__ ( unittest.TestCase ):
@slow
def a__ ( self ):
__a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
__a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] )
with torch.no_grad():
__a = model(lowerCamelCase )[0]
__a = torch.Size([1, 8, 50358] )
self.assertEqual(output.shape , lowerCamelCase )
__a = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 261 | 1 |
from __future__ import annotations
from typing import Any
def lowerCamelCase ( a_ ) -> None:
create_state_space_tree(a_ , [] , 0 )
def lowerCamelCase ( a_ , a_ , a_ ) -> None:
if index == len(a_ ):
print(a_ )
return
create_state_space_tree(a_ , a_ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(a_ , a_ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCamelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 14 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase ( a_ ) -> List[str]:
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
lowerCAmelCase_ = [image]
lowerCAmelCase_ = [trans(img.convert('RGB' ) ) for img in image]
lowerCAmelCase_ = torch.stack(a_ )
return image
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def _lowercase ( self , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = min(int(num_inference_steps * strength ) , lowercase_ )
lowerCAmelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Tuple:
'''simple docstring'''
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' )
lowerCAmelCase_ = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase_ = init_latents.shape
lowerCAmelCase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('add noise to latents at timestep' , lowercase_ )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase_ )
# 2. Preprocess image
lowerCAmelCase_ = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
lowerCAmelCase_ , lowerCAmelCase_ = self.get_timesteps(lowercase_ , lowercase_ , self.device )
lowerCAmelCase_ = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
lowerCAmelCase_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
lowerCAmelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 14 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.