code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowercase :
'''simple docstring'''
def __init__( self , snake_case__ , snake_case__=3 , snake_case__=32 , snake_case__=3 , snake_case__=10 , snake_case__=[10, 20, 30, 40] , snake_case__=[1, 1, 2, 1] , snake_case__=True , snake_case__=True , snake_case__="relu" , snake_case__=3 , snake_case__=None , ):
'''simple docstring'''
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = image_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = embeddings_size
UpperCamelCase_ = hidden_sizes
UpperCamelCase_ = depths
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_act
UpperCamelCase_ = num_labels
UpperCamelCase_ = scope
UpperCamelCase_ = len(lowerCamelCase__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = RegNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase_ = model(lowerCamelCase__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = RegNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
UpperCamelCase_ = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowercase (a_ , a_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = RegNetModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _lowerCamelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _lowerCamelCase ( self ):
'''simple docstring'''
pass
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(lowerCamelCase__ )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def _lowerCamelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
UpperCamelCase_ = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase_ = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
UpperCamelCase_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase_ = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase_ = layer_type
UpperCamelCase_ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = RegNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _lowerCAmelCase ():
UpperCamelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_torch
@require_vision
class _lowercase (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**lowerCamelCase__ )
# verify the logits
UpperCamelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
UpperCamelCase_ = torch.tensor([-0.4_180, -1.5_051, -3.4_836] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1e-4 ) )
| 128 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = abs(UpperCAmelCase_ )
_UpperCamelCase : int = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = abs(UpperCAmelCase_ )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def A__ ( UpperCAmelCase_ ):
return sum(int(UpperCAmelCase_ ) for c in str(abs(UpperCAmelCase_ ) ) )
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
_UpperCamelCase : str = f'{func.__name__}({value})'
_UpperCamelCase : Tuple = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(UpperCAmelCase_ )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 83 | 0 |
class lowercase :
def __init__( self ,A__):
lowercase = n
lowercase = [None] * self.n
lowercase = 0 # index of the first element
lowercase = 0
lowercase = 0
def __len__( self):
return self.size
def A__ ( self):
return self.size == 0
def A__ ( self):
return False if self.is_empty() else self.array[self.front]
def A__ ( self ,A__):
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''')
lowercase = data
lowercase = (self.rear + 1) % self.n
self.size += 1
return self
def A__ ( self):
if self.size == 0:
raise Exception('''UNDERFLOW''')
lowercase = self.array[self.front]
lowercase = None
lowercase = (self.front + 1) % self.n
self.size -= 1
return temp
| 101 |
'''simple docstring'''
from math import pi
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 83 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = process
SCREAMING_SNAKE_CASE : Optional[Any] = params
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dataset[i]
SCREAMING_SNAKE_CASE : Dict = self.process(lowerCamelCase__, **self.params )
return processed
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A, A, A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = loader
SCREAMING_SNAKE_CASE : Tuple = infer
SCREAMING_SNAKE_CASE : List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : Union[str, Any] = loader_batch_size
# Internal bookkeeping
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : str = None
def __len__( self ):
'''simple docstring'''
return len(self.loader )
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = iter(self.loader )
return self
def UpperCamelCase_ ( self ):
'''simple docstring'''
if isinstance(self._loader_batch_data, torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
SCREAMING_SNAKE_CASE : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
# Convert ModelOutput to tuple first
SCREAMING_SNAKE_CASE : str = element.to_tuple()
if isinstance(element[0], torch.Tensor ):
SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
SCREAMING_SNAKE_CASE : str = tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase__, lowerCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0], torch.Tensor ):
SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
SCREAMING_SNAKE_CASE : Tuple = tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
SCREAMING_SNAKE_CASE : Optional[int] = None
elif isinstance(element[self._loader_batch_index], torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : int = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index], np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
SCREAMING_SNAKE_CASE : Optional[Any] = np.expand_dims(element[self._loader_batch_index], 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
SCREAMING_SNAKE_CASE : Optional[int] = self._loader_batch_data.__class__(lowerCamelCase__ )
self._loader_batch_index += 1
return result
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
SCREAMING_SNAKE_CASE : List[str] = self.infer(lowerCamelCase__, **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase__, torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = processed
else:
SCREAMING_SNAKE_CASE : List[Any] = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : Optional[int] = processed[key]
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
SCREAMING_SNAKE_CASE : int = len(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : int = observed_batch_size
# Setting internal index to unwrap the batch
SCREAMING_SNAKE_CASE : Dict = processed
SCREAMING_SNAKE_CASE : str = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A, A, A=None ):
'''simple docstring'''
super().__init__(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = iter(self.loader )
SCREAMING_SNAKE_CASE : List[str] = None
return self
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.subiterator is None:
SCREAMING_SNAKE_CASE : Tuple = self.infer(next(self.iterator ), **self.params )
try:
# Try to return next item
SCREAMING_SNAKE_CASE : Optional[Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
SCREAMING_SNAKE_CASE : List[Any] = self.infer(next(self.iterator ), **self.params )
SCREAMING_SNAKE_CASE : int = next(self.subiterator )
return processed
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __iter__( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : Dict = self.loader_batch_item()
SCREAMING_SNAKE_CASE : List[str] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
if is_last:
return accumulator
while not is_last:
SCREAMING_SNAKE_CASE : List[Any] = self.infer(next(self.iterator ), **self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase__, torch.Tensor ):
SCREAMING_SNAKE_CASE : str = processed
else:
SCREAMING_SNAKE_CASE : Any = list(processed.keys() )[0]
SCREAMING_SNAKE_CASE : Tuple = processed[key]
if isinstance(lowerCamelCase__, lowerCamelCase__ ):
SCREAMING_SNAKE_CASE : Dict = len(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
SCREAMING_SNAKE_CASE : Any = observed_batch_size
SCREAMING_SNAKE_CASE : List[Any] = processed
SCREAMING_SNAKE_CASE : int = 0
while self._loader_batch_index < self.loader_batch_size:
SCREAMING_SNAKE_CASE : List[Any] = self.loader_batch_item()
SCREAMING_SNAKE_CASE : Optional[Any] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
if is_last:
return accumulator
else:
SCREAMING_SNAKE_CASE : Any = processed
SCREAMING_SNAKE_CASE : List[Any] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
return accumulator
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = dataset
SCREAMING_SNAKE_CASE : str = key
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self, A ):
'''simple docstring'''
return self.dataset[i][self.key]
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = dataset
SCREAMING_SNAKE_CASE : Optional[Any] = keya
SCREAMING_SNAKE_CASE : str = keya
def __len__( self ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self, A ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 251 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class lowercase__ ( lowercase ):
lowercase__ = """mvp"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] ,lowerCamelCase__ : Any=50267 ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0_2 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=0 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Tuple=100 ,lowerCamelCase__ : Optional[int]=800 ,**lowerCamelCase__ : int ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Union[str, Any] = max_position_embeddings
_UpperCamelCase : Dict = d_model
_UpperCamelCase : Any = encoder_ffn_dim
_UpperCamelCase : Dict = encoder_layers
_UpperCamelCase : Optional[Any] = encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : int = decoder_attention_heads
_UpperCamelCase : str = dropout
_UpperCamelCase : str = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : Dict = activation_function
_UpperCamelCase : List[str] = init_std
_UpperCamelCase : Dict = encoder_layerdrop
_UpperCamelCase : Tuple = decoder_layerdrop
_UpperCamelCase : Optional[int] = classifier_dropout
_UpperCamelCase : str = use_cache
_UpperCamelCase : Union[str, Any] = encoder_layers
_UpperCamelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : Any = use_prompt
_UpperCamelCase : Optional[int] = prompt_length
_UpperCamelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,forced_eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 83 | 0 |
"""simple docstring"""
import math
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = 0.0
lowerCAmelCase : Tuple = 0.0
for i in range(len(lowerCamelCase__ ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
for i in range(len(lowerCamelCase__ ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCAmelCase : Optional[Any] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCAmelCase : Dict = SelfOrganizingMap()
lowerCAmelCase : Union[str, Any] = 3
lowerCAmelCase : Optional[Any] = 0.5
for _ in range(UpperCAmelCase_ ):
for j in range(len(UpperCAmelCase_ ) ):
# training sample
lowerCAmelCase : Any = training_samples[j]
# Compute the winning vector
lowerCAmelCase : List[str] = self_organizing_map.get_winner(UpperCAmelCase_ , UpperCAmelCase_ )
# Update the winning vector
lowerCAmelCase : int = self_organizing_map.update(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# classify test sample
lowerCAmelCase : Union[str, Any] = [0, 0, 0, 1]
lowerCAmelCase : Union[str, Any] = self_organizing_map.get_winner(UpperCAmelCase_ , UpperCAmelCase_ )
# results
print(f"""Clusters that the test sample belongs to : {winner}""" )
print(f"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 108 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase__ ( lowercase ):
lowercase__ = """openai/whisper-base"""
lowercase__ = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
lowercase__ = """transcriber"""
lowercase__ = WhisperProcessor
lowercase__ = WhisperForConditionalGeneration
lowercase__ = ["""audio"""]
lowercase__ = ["""text"""]
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ ,return_tensors='pt' ).input_features
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )[0]
| 83 | 0 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 120 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
snake_case_ : str = logging.getLogger(__name__)
def A__ ( ):
_UpperCamelCase : List[Any] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=UpperCAmelCase_ , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=UpperCAmelCase_ , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=UpperCAmelCase_ , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=UpperCAmelCase_ , default='data/dump' , help='The dump file prefix.' )
_UpperCamelCase : Any = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Optional[int] = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
_UpperCamelCase : Dict = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
_UpperCamelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Any = tokenizer.special_tokens_map['cls_token'] # `<s>`
_UpperCamelCase : int = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
_UpperCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Optional[Any] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
_UpperCamelCase : Any = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
_UpperCamelCase : List[Any] = fp.readlines()
logger.info('Start encoding' )
logger.info(f'{len(UpperCAmelCase_ )} examples to process.' )
_UpperCamelCase : int = []
_UpperCamelCase : Any = 0
_UpperCamelCase : Any = 1_0_0_0_0
_UpperCamelCase : Optional[Any] = time.time()
for text in data:
_UpperCamelCase : List[Any] = f'{bos} {text.strip()} {sep}'
_UpperCamelCase : Any = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
rslt.append(UpperCAmelCase_ )
iter += 1
if iter % interval == 0:
_UpperCamelCase : Union[str, Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
_UpperCamelCase : Tuple = time.time()
logger.info('Finished binarization' )
logger.info(f'{len(UpperCAmelCase_ )} examples processed.' )
_UpperCamelCase : Optional[int] = f'{args.dump_file}.{args.tokenizer_name}.pickle'
_UpperCamelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
_UpperCamelCase : List[Any] = [np.uintaa(UpperCAmelCase_ ) for d in rslt]
else:
_UpperCamelCase : Any = [np.intaa(UpperCAmelCase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(UpperCAmelCase_ , 'wb' ) as handle:
pickle.dump(rslt_ , UpperCAmelCase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 83 | 0 |
"""simple docstring"""
lowerCAmelCase__ = {str(digit): digit**5 for digit in range(10)}
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCAmelCase_ ) )
def snake_case_ ( ):
'''simple docstring'''
return sum(
number
for number in range(10_00, 1_00_00_00 )
if number == digits_fifth_powers_sum(UpperCAmelCase_ ) )
if __name__ == "__main__":
print(solution())
| 72 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
snake_case_ : List[Any] = None
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Dict = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
snake_case_ : List[str] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
snake_case_ : List[str] = '▁'
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = AlbertTokenizer
def __init__( self : Tuple ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : Optional[int]="[CLS]" ,lowerCamelCase__ : Union[str, Any]="[SEP]" ,lowerCamelCase__ : Optional[int]="<unk>" ,lowerCamelCase__ : str="[SEP]" ,lowerCamelCase__ : List[Any]="<pad>" ,lowerCamelCase__ : Dict="[CLS]" ,lowerCamelCase__ : int="[MASK]" ,**lowerCamelCase__ : Any ,):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCamelCase : Dict = (
AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ,normalized=lowerCamelCase__ )
if isinstance(lowerCamelCase__ ,lowerCamelCase__ )
else mask_token
)
super().__init__(
lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,remove_space=lowerCamelCase__ ,keep_accents=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,**lowerCamelCase__ ,)
_UpperCamelCase : Tuple = do_lower_case
_UpperCamelCase : str = remove_space
_UpperCamelCase : Optional[Any] = keep_accents
_UpperCamelCase : Dict = vocab_file
_UpperCamelCase : Dict = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : List[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : int = [self.sep_token_id]
_UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCamelCase : Dict = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file ,lowerCamelCase__ )
return (out_vocab_file,)
| 83 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A ( UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase__ : List[Any] =MgpstrTokenizer
UpperCamelCase__ : Any =False
UpperCamelCase__ : Any ={}
UpperCamelCase__ : Dict =False
def lowerCamelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
super().setUp()
# fmt: off
_lowerCamelCase : Optional[Any] =['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
_lowerCamelCase : str =dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_lowerCamelCase : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '\n' )
def lowerCamelCase ( self : int , **lowercase_ : str ) -> Tuple:
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCamelCase ( self : str , lowercase_ : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Any ='tester'
_lowerCamelCase : Optional[int] ='tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
_lowerCamelCase : List[Any] =self.get_tokenizers(do_lower_case=lowerCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowerCamelCase : Union[str, Any] ='[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
_lowerCamelCase : List[Any] =tokenizer.encode([special_token] , add_special_tokens=lowerCamelCase__ )
self.assertEqual(len(lowerCamelCase__ ) , 1 )
_lowerCamelCase : Any =tokenizer.decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ )
self.assertTrue(special_token not in decoded )
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : Optional[int] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_lowerCamelCase : List[Any] =self.get_input_output_texts(lowerCamelCase__ )
_lowerCamelCase : List[str] =tokenizer.tokenize(lowerCamelCase__ )
_lowerCamelCase : str =tokenizer.convert_tokens_to_ids(lowerCamelCase__ )
_lowerCamelCase : List[str] =tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase : Optional[Any] =tokenizer.convert_ids_to_tokens(lowerCamelCase__ )
self.assertNotEqual(len(lowerCamelCase__ ) , 0 )
_lowerCamelCase : Dict =tokenizer.decode(lowerCamelCase__ )
self.assertIsInstance(lowerCamelCase__ , lowerCamelCase__ )
self.assertEqual(text_a.replace(' ' , '' ) , lowerCamelCase__ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def lowerCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def lowerCamelCase ( self : Any ) -> Dict:
"""simple docstring"""
pass
| 199 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( lowercase ):
def __init__( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : str = dataset
_UpperCamelCase : Optional[Any] = process
_UpperCamelCase : Optional[Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.dataset[i]
_UpperCamelCase : Dict = self.process(lowerCamelCase__ ,**self.params )
return processed
class lowercase__ ( lowercase ):
def __init__( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int]=None ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = loader
_UpperCamelCase : Tuple = infer
_UpperCamelCase : List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCamelCase : Any = None
_UpperCamelCase : Union[str, Any] = loader_batch_size
# Internal bookkeeping
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : str = None
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = iter(self.loader )
return self
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
if isinstance(self._loader_batch_data ,torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCamelCase : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCamelCase : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
# Convert ModelOutput to tuple first
_UpperCamelCase : str = element.to_tuple()
if isinstance(element[0] ,torch.Tensor ):
_UpperCamelCase : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
_UpperCamelCase : str = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] ,torch.Tensor ):
_UpperCamelCase : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
_UpperCamelCase : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCamelCase : Optional[int] = None
elif isinstance(element[self._loader_batch_index] ,torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase : int = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] ,np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase : Optional[Any] = np.expand_dims(element[self._loader_batch_index] ,0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCamelCase : Union[str, Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCamelCase : Optional[int] = self._loader_batch_data.__class__(lowerCamelCase__ )
self._loader_batch_index += 1
return result
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCamelCase : Tuple = next(self.iterator )
_UpperCamelCase : List[str] = self.infer(lowerCamelCase__ ,**self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase__ ,torch.Tensor ):
_UpperCamelCase : List[Any] = processed
else:
_UpperCamelCase : List[Any] = list(processed.keys() )[0]
_UpperCamelCase : Optional[int] = processed[key]
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : int = len(lowerCamelCase__ )
else:
_UpperCamelCase : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase : int = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCamelCase : Dict = processed
_UpperCamelCase : str = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( lowercase ):
def __init__( self : str ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any=None ):
'''simple docstring'''
super().__init__(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def __iter__( self : Dict ):
'''simple docstring'''
_UpperCamelCase : str = iter(self.loader )
_UpperCamelCase : List[str] = None
return self
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
_UpperCamelCase : Tuple = self.infer(next(self.iterator ) ,**self.params )
try:
# Try to return next item
_UpperCamelCase : Optional[Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCamelCase : List[Any] = self.infer(next(self.iterator ) ,**self.params )
_UpperCamelCase : int = next(self.subiterator )
return processed
class lowercase__ ( lowercase ):
def __iter__( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Dict = iter(self.loader )
return self
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase : Dict = self.loader_batch_item()
_UpperCamelCase : List[str] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
if is_last:
return accumulator
while not is_last:
_UpperCamelCase : List[Any] = self.infer(next(self.iterator ) ,**self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase__ ,torch.Tensor ):
_UpperCamelCase : str = processed
else:
_UpperCamelCase : Any = list(processed.keys() )[0]
_UpperCamelCase : Tuple = processed[key]
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Dict = len(lowerCamelCase__ )
else:
_UpperCamelCase : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase : Any = observed_batch_size
_UpperCamelCase : List[Any] = processed
_UpperCamelCase : int = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase : List[Any] = self.loader_batch_item()
_UpperCamelCase : Optional[Any] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
if is_last:
return accumulator
else:
_UpperCamelCase : Any = processed
_UpperCamelCase : List[Any] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
return accumulator
class lowercase__ ( lowercase ):
def __init__( self : Tuple ,lowerCamelCase__ : Dataset ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : int = dataset
_UpperCamelCase : str = key
def __len__( self : Dict ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( lowercase ):
def __init__( self : List[Any] ,lowerCamelCase__ : Dataset ,lowerCamelCase__ : str ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : int = dataset
_UpperCamelCase : Optional[Any] = keya
_UpperCamelCase : str = keya
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 83 | 0 |
import os
import pytest
from attr import dataclass
A__ = 'us-east-1' # defaults region
@dataclass
class __lowerCAmelCase :
__lowerCamelCase = 42
__lowerCamelCase = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
__lowerCamelCase = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5_500,
}
__lowerCamelCase = {**hyperparameters, '''max_steps''': 1_000}
@property
def snake_case ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case ( self ):
"""simple docstring"""
return F'{self.framework}-transfromers-test'
@property
def snake_case ( self ):
"""simple docstring"""
return F'./tests/sagemaker/scripts/{self.framework}'
@property
def snake_case ( self ):
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = SageMakerTestEnvironment(framework=request.cls.framework )
| 82 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
snake_case_ : Any = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def A__ ( ):
_UpperCamelCase : Tuple = Github(os.environ['GITHUB_TOKEN'] )
_UpperCamelCase : List[Any] = g.get_repo('huggingface/diffusers' )
_UpperCamelCase : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
_UpperCamelCase : Dict = sorted(issue.get_comments() , key=lambda UpperCAmelCase_ : i.created_at , reverse=UpperCAmelCase_ )
_UpperCamelCase : List[str] = comments[0] if len(UpperCAmelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 83 | 0 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase = 2_048
UpperCAmelCase = 4_096
UpperCAmelCase = 42
UpperCAmelCase = os.environ.pop("""PROCESS_TRAIN""", """false""")
UpperCAmelCase = {'null': 0, 'short': 1, 'long': 2, 'yes': 3, 'no': 4}
def lowercase ( a__ : Optional[int] ) -> int:
def choose_first(a__ : Dict , a__ : Any=False ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 1:
_UpperCamelCase = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
_UpperCamelCase = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
_UpperCamelCase = {'id': example['id']}
_UpperCamelCase = example['annotations']
_UpperCamelCase = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
_UpperCamelCase = ['yes'] if 1 in yes_no_answer else ['no']
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = ['<cls>']
else:
_UpperCamelCase = ['short']
_UpperCamelCase = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
_UpperCamelCase = ['long']
_UpperCamelCase = choose_first(annotation['''long_answer'''] , is_long_answer=UpperCAmelCase_ )
_UpperCamelCase = []
answer.update(UpperCAmelCase_ )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
_UpperCamelCase = True
else:
_UpperCamelCase = False
_UpperCamelCase = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , UpperCAmelCase_ ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def lowercase ( a__ : Optional[int] , a__ : List[str]=False ) -> str:
_UpperCamelCase = _get_single_answer(UpperCAmelCase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_UpperCamelCase = example['document']['tokens']
_UpperCamelCase = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(UpperCAmelCase_ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
_UpperCamelCase = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
_UpperCamelCase = example['document']['tokens']
_UpperCamelCase = answer['start_token']
_UpperCamelCase = answer['end_token']
_UpperCamelCase = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
_UpperCamelCase = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
_UpperCamelCase = doc['is_html'][answer['start_token'] : answer['end_token']]
_UpperCamelCase = doc['token'][answer['start_token'] : answer['end_token']]
_UpperCamelCase = ' '.join([old[i] for i in range(len(UpperCAmelCase_ ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , UpperCAmelCase_ , end='''\n''' )
print('''Old:''' , UpperCAmelCase_ , end='''\n\n''' )
return {
"context": " ".join(UpperCAmelCase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def lowercase ( a__ : List[Any] , a__ : Tuple , a__ : int=2048 , a__ : Tuple=4096 , a__ : str=True ) -> Optional[Any]:
# overlap will be of doc_stride - q_len
_UpperCamelCase = get_context_and_ans(UpperCAmelCase_ , assertion=UpperCAmelCase_ )
_UpperCamelCase = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
_UpperCamelCase = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
_UpperCamelCase = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = input_ids[:q_len]
_UpperCamelCase = range(UpperCAmelCase_ , len(UpperCAmelCase_ ) , max_length - doc_stride )
for i in doc_start_indices:
_UpperCamelCase = i + max_length - q_len
_UpperCamelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(UpperCAmelCase_ ),
"end_token": [-100] * len(UpperCAmelCase_ ),
"category": category,
},
}
_UpperCamelCase = out['context'].split()
_UpperCamelCase = splitted_context[answer['end_token']]
_UpperCamelCase = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=UpperCAmelCase_ , ).input_ids )
_UpperCamelCase = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=UpperCAmelCase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
_UpperCamelCase = len(tokenizer(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
_UpperCamelCase = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
_UpperCamelCase = answer['start_token']
_UpperCamelCase = answer['end_token']
if assertion:
_UpperCamelCase = tokenizer.decode(UpperCAmelCase_ )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , UpperCAmelCase_ , end='''\n\n''' )
if len(UpperCAmelCase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
_UpperCamelCase = input_ids[:q_len]
_UpperCamelCase = range(UpperCAmelCase_ , len(UpperCAmelCase_ ) , max_length - doc_stride )
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = [] # null, yes, no, long, short
for i in doc_start_indices:
_UpperCamelCase = i + max_length - q_len
_UpperCamelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
_UpperCamelCase = start_token - i + q_len
_UpperCamelCase = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
_UpperCamelCase = -100
_UpperCamelCase = -100
answers_category.append('''null''' )
_UpperCamelCase = inputs[-1][start_token : end_token + 1]
answers_start_token.append(UpperCAmelCase_ )
answers_end_token.append(UpperCAmelCase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(UpperCAmelCase_ ) )
print('''Old:''' , tokenizer.decode(UpperCAmelCase_ ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def lowercase ( a__ : Optional[Any] , a__ : Union[str, Any] , a__ : List[Any]=2048 , a__ : Optional[Any]=4096 , a__ : List[Any]=False ) -> Optional[int]:
_UpperCamelCase = get_strided_contexts_and_ans(
UpperCAmelCase_ , UpperCAmelCase_ , doc_stride=UpperCAmelCase_ , max_length=UpperCAmelCase_ , assertion=UpperCAmelCase_ , )
return example
def lowercase ( a__ : Any , a__ : List[str] ) -> Dict:
with jsonlines.open(UpperCAmelCase_ , '''a''' ) as writer:
for example in tqdm(UpperCAmelCase_ , total=len(UpperCAmelCase_ ) , desc='''Saving samples ... ''' ):
_UpperCamelCase = example['labels']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase = load_dataset("""natural_questions""")
UpperCAmelCase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
UpperCAmelCase = data['train' if PROCESS_TRAIN == 'true' else 'validation']
UpperCAmelCase = {
'tokenizer': tokenizer,
'doc_stride': DOC_STRIDE,
'max_length': MAX_LENGTH,
'assertion': False,
}
UpperCAmelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
UpperCAmelCase = 'nq-training.jsonl' if PROCESS_TRAIN == 'true' else 'nq-validation.jsonl'
save_to_disk(data, file_name=cache_file_name)
| 256 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowercase ) , """Tatoeba directory does not exist.""" )
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
self.resolver.convert_models(['heb-eng'] )
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Dict = self.resolver.write_model_card('opus-mt-he-en' ,dry_run=lowerCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 83 | 0 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
_lowerCamelCase : Dict = parser.parse_args()
_lowerCamelCase : List[Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 167 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : int = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-prophetnet"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase__ : Optional[int] = 30522 ,lowerCamelCase__ : Optional[int] = 1024 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[int] = 512 ,lowerCamelCase__ : Optional[float] = 0.0_2 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 2 ,lowerCamelCase__ : Optional[int] = 32 ,lowerCamelCase__ : Optional[int] = 128 ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[float] = 0.0 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : Optional[int] = 2 ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : str = encoder_ffn_dim
_UpperCamelCase : List[Any] = num_encoder_layers
_UpperCamelCase : Tuple = num_encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : List[Any] = num_decoder_layers
_UpperCamelCase : List[Any] = num_decoder_attention_heads
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : str = init_std # Normal(0, this parameter)
_UpperCamelCase : List[str] = activation_function
# parameters for xlmprophetnet
_UpperCamelCase : Tuple = ngram
_UpperCamelCase : Optional[Any] = num_buckets
_UpperCamelCase : Tuple = relative_max_distance
_UpperCamelCase : str = disable_ngram_loss
_UpperCamelCase : str = eps
# 3 Types of Dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : str = activation_dropout
_UpperCamelCase : List[str] = dropout
_UpperCamelCase : Tuple = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,add_cross_attention=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 83 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return 10 - x * x
def lowercase__ ( __lowercase : Tuple , __lowercase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if equation(UpperCAmelCase_ ) * equation(UpperCAmelCase_ ) >= 0:
raise ValueError('Wrong space!' )
__UpperCamelCase = a
while (b - a) >= 0.0_1:
# Find middle point
__UpperCamelCase = (a + b) / 2
# Check if middle point is root
if equation(UpperCAmelCase_ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(UpperCAmelCase_ ) * equation(UpperCAmelCase_ ) < 0:
__UpperCamelCase = c
else:
__UpperCamelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 53 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1_0_0_0 ):
_UpperCamelCase : Dict = 3
_UpperCamelCase : Any = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83 | 0 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
UpperCAmelCase : Any =50_0000
UpperCAmelCase : int =os.path.split(__file__)
UpperCAmelCase : str =os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _lowerCAmelCase (_lowerCAmelCase , **_lowerCAmelCase):
UpperCamelCase_ = dataset.map(**UpperCAmelCase_)
@get_duration
def _lowerCAmelCase (_lowerCAmelCase , **_lowerCAmelCase):
UpperCamelCase_ = dataset.filter(**UpperCAmelCase_)
def _lowerCAmelCase ():
UpperCamelCase_ = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ = datasets.Features({"text": datasets.Value("string"), "numbers": datasets.Value("float32")})
UpperCamelCase_ = generate_example_dataset(
os.path.join(UpperCAmelCase_ , "dataset.arrow") , UpperCAmelCase_ , num_examples=UpperCAmelCase_)
UpperCamelCase_ = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=UpperCAmelCase_)
def tokenize(_lowerCAmelCase):
return tokenizer(examples["text"])
UpperCamelCase_ = map(UpperCAmelCase_)
UpperCamelCase_ = map(UpperCAmelCase_ , batched=UpperCAmelCase_)
UpperCamelCase_ = map(UpperCAmelCase_ , function=lambda _lowerCAmelCase: None , batched=UpperCAmelCase_)
with dataset.formatted_as(type="numpy"):
UpperCamelCase_ = map(UpperCAmelCase_ , function=lambda _lowerCAmelCase: None , batched=UpperCAmelCase_)
with dataset.formatted_as(type="pandas"):
UpperCamelCase_ = map(UpperCAmelCase_ , function=lambda _lowerCAmelCase: None , batched=UpperCAmelCase_)
with dataset.formatted_as(type="torch" , columns="numbers"):
UpperCamelCase_ = map(UpperCAmelCase_ , function=lambda _lowerCAmelCase: None , batched=UpperCAmelCase_)
with dataset.formatted_as(type="tensorflow" , columns="numbers"):
UpperCamelCase_ = map(UpperCAmelCase_ , function=lambda _lowerCAmelCase: None , batched=UpperCAmelCase_)
UpperCamelCase_ = map(UpperCAmelCase_ , function=UpperCAmelCase_ , batched=UpperCAmelCase_)
UpperCamelCase_ = filter(UpperCAmelCase_)
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(UpperCAmelCase_ , "wb") as f:
f.write(json.dumps(UpperCAmelCase_).encode("utf-8"))
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 128 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 83 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowercase :
def __init__( self ,A__ ,A__=1_3 ,A__=1_0 ,A__=3 ,A__=2 ,A__=2 ,A__=2 ,A__=True ,A__=True ,A__=3_2 ,A__=5 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=1_0 ,A__=0.02 ,A__=0.9 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = patch_size
lowercase = tubelet_size
lowercase = num_frames
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = mask_ratio
lowercase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase = (image_size // patch_size) ** 2
lowercase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase = int(mask_ratio * self.seq_length)
def A__ ( self):
lowercase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return VideoMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,tubelet_size=self.tubelet_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = VideoMAEModel(config=lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
lowercase = model(lowerCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__):
lowercase = VideoMAEForPreTraining(lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase = torch.ones((self.num_masks,))
lowercase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))])
lowercase = mask.expand(self.batch_size ,-1).bool()
lowercase = model(lowerCamelCase__ ,lowerCamelCase__)
# model only returns predictions for masked patches
lowercase = mask.sum().item()
lowercase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_masked_patches, decoder_num_labels))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase = config_and_inputs
lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : List[Any] =(
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowercase_ : List[str] =(
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowercase_ : Tuple =False
lowercase_ : Optional[int] =False
lowercase_ : Dict =False
lowercase_ : Union[str, Any] =False
def A__ ( self):
lowercase = VideoMAEModelTester(self)
lowercase = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=3_7)
def A__ ( self ,A__ ,A__ ,A__=False):
lowercase = copy.deepcopy(lowerCamelCase__)
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase = torch.ones((self.model_tester.num_masks,))
lowercase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))])
lowercase = mask.expand(self.model_tester.batch_size ,-1).bool()
lowercase = bool_masked_pos.to(lowerCamelCase__)
if return_labels:
if model_class in [
*get_values(lowerCamelCase__),
]:
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase__)
return inputs_dict
def A__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''')
def A__ ( self):
pass
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(lowerCamelCase__)
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module))
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear))
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(lowerCamelCase__)
lowercase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__)
@slow
def A__ ( self):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = VideoMAEModel.from_pretrained(lowerCamelCase__)
self.assertIsNotNone(lowerCamelCase__)
def A__ ( self):
if not self.has_attentions:
pass
else:
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
lowercase = self.model_tester.seq_length - self.model_tester.num_masks
lowercase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase = True
lowercase = False
lowercase = True
lowercase = model_class(lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__))
lowercase = outputs.attentions
self.assertEqual(len(lowerCamelCase__) ,self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase = True
lowercase = model_class(lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__))
lowercase = outputs.attentions
self.assertEqual(len(lowerCamelCase__) ,self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
lowercase = len(lowerCamelCase__)
# Check attention is always last and order is fine
lowercase = True
lowercase = True
lowercase = model_class(lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__))
self.assertEqual(out_len + 1 ,len(lowerCamelCase__))
lowercase = outputs.attentions
self.assertEqual(len(lowerCamelCase__) ,self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def A__ ( self):
def check_hidden_states_output(A__ ,A__ ,A__):
lowercase = model_class(lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__))
lowercase = outputs.hidden_states
lowercase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase__) ,lowerCamelCase__)
lowercase = self.model_tester.seq_length - self.model_tester.num_masks
lowercase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) ,[seq_length, self.model_tester.hidden_size] ,)
lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def A__ ( self):
pass
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowercase = np.load(UpperCAmelCase_ )
return list(UpperCAmelCase_ )
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def A__ ( self):
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def A__ ( self):
lowercase = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''').to(
lowerCamelCase__)
lowercase = self.default_image_processor
lowercase = prepare_video()
lowercase = image_processor(lowerCamelCase__ ,return_tensors='''pt''').to(lowerCamelCase__)
# forward pass
with torch.no_grad():
lowercase = model(**lowerCamelCase__)
# verify the logits
lowercase = torch.Size((1, 4_0_0))
self.assertEqual(outputs.logits.shape ,lowerCamelCase__)
lowercase = torch.tensor([0.3669, -0.0688, -0.2421]).to(lowerCamelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,lowerCamelCase__ ,atol=1E-4))
@slow
def A__ ( self):
lowercase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''').to(lowerCamelCase__)
lowercase = self.default_image_processor
lowercase = prepare_video()
lowercase = image_processor(lowerCamelCase__ ,return_tensors='''pt''').to(lowerCamelCase__)
# add boolean mask, indicating which patches to mask
lowercase = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' ,filename='''bool_masked_pos.pt''')
lowercase = torch.load(lowerCamelCase__)
# forward pass
with torch.no_grad():
lowercase = model(**lowerCamelCase__)
# verify the logits
lowercase = torch.Size([1, 1_4_0_8, 1_5_3_6])
lowercase = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] ,device=lowerCamelCase__)
self.assertEqual(outputs.logits.shape ,lowerCamelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,lowerCamelCase__ ,atol=1E-4))
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase = torch.tensor([0.5142] ,device=lowerCamelCase__)
self.assertTrue(torch.allclose(outputs.loss ,lowerCamelCase__ ,atol=1E-4))
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ,norm_pix_loss=lowerCamelCase__).to(
lowerCamelCase__)
with torch.no_grad():
lowercase = model(**lowerCamelCase__)
lowercase = torch.tensor(torch.tensor([0.6469]) ,device=lowerCamelCase__)
self.assertTrue(torch.allclose(outputs.loss ,lowerCamelCase__ ,atol=1E-4))
| 101 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
snake_case_ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowercase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowercase__ :
lowercase__ = field(default=lowercase , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
if self.train_file is not None:
_UpperCamelCase : List[Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCamelCase : Union[str, Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowercase__ :
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
def __call__( self : Optional[Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = 'label' if 'label' in features[0].keys() else 'labels'
_UpperCamelCase : List[Any] = [feature.pop(lowerCamelCase__ ) for feature in features]
_UpperCamelCase : Dict = len(lowerCamelCase__ )
_UpperCamelCase : List[str] = len(features[0]['input_ids'] )
_UpperCamelCase : List[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase__ )] for feature in features
]
_UpperCamelCase : str = list(chain(*lowerCamelCase__ ) )
_UpperCamelCase : Tuple = self.tokenizer.pad(
lowerCamelCase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
_UpperCamelCase : str = {k: v.view(lowerCamelCase__ ,lowerCamelCase__ ,-1 ) for k, v in batch.items()}
# Add back labels
_UpperCamelCase : Optional[int] = torch.tensor(lowerCamelCase__ ,dtype=torch.intaa )
return batch
def A__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
datasets.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_UpperCamelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCamelCase : Optional[int] = {}
if data_args.train_file is not None:
_UpperCamelCase : Tuple = data_args.train_file
if data_args.validation_file is not None:
_UpperCamelCase : Tuple = data_args.validation_file
_UpperCamelCase : Any = data_args.train_file.split('.' )[-1]
_UpperCamelCase : Union[str, Any] = load_dataset(
UpperCAmelCase_ , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCamelCase : List[str] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCamelCase : Any = [f'ending{i}' for i in range(4 )]
_UpperCamelCase : int = 'sent1'
_UpperCamelCase : List[str] = 'sent2'
if data_args.max_seq_length is None:
_UpperCamelCase : int = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_UpperCamelCase : int = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_UpperCamelCase : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCAmelCase_ ):
_UpperCamelCase : str = [[context] * 4 for context in examples[context_name]]
_UpperCamelCase : Optional[Any] = examples[question_header_name]
_UpperCamelCase : Tuple = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(UpperCAmelCase_ )
]
# Flatten out
_UpperCamelCase : Optional[int] = list(chain(*UpperCAmelCase_ ) )
_UpperCamelCase : Optional[Any] = list(chain(*UpperCAmelCase_ ) )
# Tokenize
_UpperCamelCase : Tuple = tokenizer(
UpperCAmelCase_ , UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCamelCase : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCamelCase : Tuple = min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
_UpperCamelCase : Tuple = train_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_UpperCamelCase : Union[str, Any] = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCamelCase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCamelCase : Union[str, Any] = min(len(UpperCAmelCase_ ) , data_args.max_eval_samples )
_UpperCamelCase : str = eval_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_UpperCamelCase : Dict = eval_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_UpperCamelCase : List[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = eval_predictions
_UpperCamelCase : List[str] = np.argmax(UpperCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCamelCase : Optional[int] = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
# Training
if training_args.do_train:
_UpperCamelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase : int = last_checkpoint
_UpperCamelCase : List[str] = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCamelCase : Union[str, Any] = train_result.metrics
_UpperCamelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
_UpperCamelCase : Optional[Any] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('train' , UpperCAmelCase_ )
trainer.save_metrics('train' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCamelCase : List[Any] = trainer.evaluate()
_UpperCamelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase_ )
_UpperCamelCase : int = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('eval' , UpperCAmelCase_ )
trainer.save_metrics('eval' , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 83 | 0 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 251 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
lowercase__ = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
lowercase__ = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def A__ ( ):
_UpperCamelCase : Optional[Any] = HfArgumentParser((ModelArguments,) )
((_UpperCamelCase) , ) : Optional[int] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_UpperCamelCase : Any = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_UpperCamelCase : str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_UpperCamelCase : str = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : str = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=UpperCAmelCase_ , decoder_config=UpperCAmelCase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_UpperCamelCase : str = decoder_config.decoder_start_token_id
_UpperCamelCase : Optional[int] = decoder_config.pad_token_id
if decoder_start_token_id is None:
_UpperCamelCase : int = decoder_config.bos_token_id
if pad_token_id is None:
_UpperCamelCase : Dict = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_UpperCamelCase : List[Any] = decoder_config.eos_token_id
_UpperCamelCase : Dict = decoder_start_token_id
_UpperCamelCase : int = pad_token_id
_UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 83 | 0 |
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Dict = set(UpperCAmelCase_ ), [start]
while stack:
lowerCAmelCase : List[Any] = stack.pop()
explored.add(UpperCAmelCase_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCAmelCase_ )
return explored
lowerCAmelCase__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 108 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case_ : Dict = logging.get_logger(__name__)
class lowercase__ ( lowercase ):
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : float ,**lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : List[Any] = feature_size
_UpperCamelCase : Any = sampling_rate
_UpperCamelCase : Optional[Any] = padding_value
_UpperCamelCase : Union[str, Any] = kwargs.pop('padding_side' ,'right' )
_UpperCamelCase : Dict = kwargs.pop('return_attention_mask' ,lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] ,lowerCamelCase__ : Union[bool, str, PaddingStrategy] = True ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,):
'''simple docstring'''
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase__ ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
_UpperCamelCase : int = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
_UpperCamelCase : List[Any] = processed_features[self.model_input_names[0]]
_UpperCamelCase : Dict = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__ ) == 0:
if return_attention_mask:
_UpperCamelCase : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_UpperCamelCase : List[str] = required_input[0]
if isinstance(lowerCamelCase__ ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_UpperCamelCase : List[str] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase__ ):
_UpperCamelCase : Dict = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__ ):
_UpperCamelCase : Any = 'tf'
elif is_torch_tensor(lowerCamelCase__ ):
_UpperCamelCase : Optional[int] = 'pt'
elif isinstance(lowerCamelCase__ ,(int, float, list, tuple, np.ndarray) ):
_UpperCamelCase : int = 'np'
else:
raise ValueError(
F'type of {first_element} unknown: {type(lowerCamelCase__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
_UpperCamelCase : Any = to_numpy(lowerCamelCase__ )
else:
_UpperCamelCase : Any = [to_numpy(lowerCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
_UpperCamelCase : Optional[int] = self._get_padding_strategies(padding=lowerCamelCase__ ,max_length=lowerCamelCase__ )
_UpperCamelCase : str = processed_features[self.model_input_names[0]]
_UpperCamelCase : List[str] = len(lowerCamelCase__ )
if not all(len(lowerCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
_UpperCamelCase : List[str] = []
for i in range(lowerCamelCase__ ):
_UpperCamelCase : List[str] = {k: v[i] for k, v in processed_features.items()}
# truncation
_UpperCamelCase : List[str] = self._truncate(
lowerCamelCase__ ,max_length=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,truncation=lowerCamelCase__ ,)
truncated_inputs.append(lowerCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_UpperCamelCase : Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_UpperCamelCase : Any = PaddingStrategy.MAX_LENGTH
_UpperCamelCase : Optional[Any] = {}
for i in range(lowerCamelCase__ ):
# padding
_UpperCamelCase : Any = self._pad(
truncated_inputs[i] ,max_length=lowerCamelCase__ ,padding_strategy=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,)
for key, value in outputs.items():
if key not in batch_outputs:
_UpperCamelCase : Dict = []
if value.dtype is np.dtype(np.floataa ):
_UpperCamelCase : Any = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase__ )
return BatchFeature(lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_UpperCamelCase : Optional[Any] = len(lowerCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCamelCase : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCamelCase : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_UpperCamelCase : Tuple = np.ones(len(lowerCamelCase__ ) ,dtype=np.intaa )
if needs_to_be_padded:
_UpperCamelCase : Dict = max_length - len(lowerCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
_UpperCamelCase : Optional[int] = np.pad(
processed_features['attention_mask'] ,(0, difference) )
_UpperCamelCase : Union[str, Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_UpperCamelCase : List[Any] = np.pad(
lowerCamelCase__ ,lowerCamelCase__ ,'constant' ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_UpperCamelCase : List[Any] = np.pad(
processed_features['attention_mask'] ,(difference, 0) )
_UpperCamelCase : List[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_UpperCamelCase : List[str] = np.pad(
lowerCamelCase__ ,lowerCamelCase__ ,'constant' ,constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
_UpperCamelCase : int = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCamelCase : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCamelCase : Optional[int] = len(lowerCamelCase__ ) > max_length
if needs_to_be_truncated:
_UpperCamelCase : Dict = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_UpperCamelCase : Optional[Any] = processed_features['attention_mask'][:max_length]
return processed_features
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : int=False ,lowerCamelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Get padding strategy
if padding is not False:
if padding is True:
_UpperCamelCase : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Tuple = PaddingStrategy(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = padding
else:
_UpperCamelCase : List[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 83 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = (DEISMultistepScheduler,)
lowercase = (('num_inference_steps', 25),)
def __lowercase ( self : Dict , **lowerCamelCase : Optional[int] ) -> Any:
lowerCAmelCase_ : Union[str, Any] = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**lowerCamelCase__ )
return config
def __lowercase ( self : List[Any] , lowerCamelCase : Tuple=0 , **lowerCamelCase : List[str] ) -> int:
lowerCAmelCase_ : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase_ : int = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ )
lowerCAmelCase_ : int = self.dummy_sample
lowerCAmelCase_ : Union[str, Any] = 0.1 * sample
lowerCAmelCase_ : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase__ )
lowerCAmelCase_ : Optional[int] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
lowerCAmelCase_ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
lowerCAmelCase_ : Tuple = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
lowerCAmelCase_ : str = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ : Any = sample, sample
for t in range(lowerCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase_ : int = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
lowerCAmelCase_ : Optional[Any] = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowercase ( self : Tuple ) -> Dict:
pass
def __lowercase ( self : List[Any] , lowerCamelCase : List[str]=0 , **lowerCamelCase : Dict ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = dict(self.forward_default_kwargs )
lowerCAmelCase_ : List[Any] = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ )
lowerCAmelCase_ : int = self.dummy_sample
lowerCAmelCase_ : str = 0.1 * sample
lowerCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase_ : Tuple = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase_ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
lowerCAmelCase_ : Optional[int] = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase_ : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase_ : Optional[int] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
lowerCAmelCase_ : List[Any] = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __lowercase ( self : Any , lowerCamelCase : int=None , **lowerCamelCase : Any ) -> str:
if scheduler is None:
lowerCAmelCase_ : Any = self.scheduler_classes[0]
lowerCAmelCase_ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase__ )
lowerCAmelCase_ : str = scheduler_class(**lowerCamelCase__ )
lowerCAmelCase_ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase_ : str = self.get_scheduler_config(**lowerCamelCase__ )
lowerCAmelCase_ : int = scheduler_class(**lowerCamelCase__ )
lowerCAmelCase_ : Dict = 10
lowerCAmelCase_ : Optional[int] = self.dummy_model()
lowerCAmelCase_ : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Dict = model(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_ : List[str] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def __lowercase ( self : Optional[int] ) -> Dict:
lowerCAmelCase_ : Any = dict(self.forward_default_kwargs )
lowerCAmelCase_ : Dict = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase_ : Optional[int] = scheduler_class(**lowerCamelCase__ )
lowerCAmelCase_ : str = self.dummy_sample
lowerCAmelCase_ : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ , """set_timesteps""" ):
lowerCAmelCase_ : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase_ : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase_ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase_ : Union[str, Any] = scheduler.timesteps[5]
lowerCAmelCase_ : int = scheduler.timesteps[6]
lowerCAmelCase_ : Tuple = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
lowerCAmelCase_ : str = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __lowercase ( self : Union[str, Any] ) -> Tuple:
lowerCAmelCase_ : List[str] = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase_ : Any = self.full_loop(scheduler=lowerCamelCase__ )
lowerCAmelCase_ : List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
lowerCAmelCase_ : str = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : List[str] = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase_ : Dict = self.full_loop(scheduler=lowerCamelCase__ )
lowerCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def __lowercase ( self : Optional[int] ) -> Any:
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def __lowercase ( self : Any ) -> Optional[Any]:
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , algorithm_type="""deis""" , solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , )
def __lowercase ( self : List[str] ) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def __lowercase ( self : Dict ) -> List[Any]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
lowerCAmelCase_ : int = self.full_loop(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def __lowercase ( self : int ) -> List[str]:
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def __lowercase ( self : List[Any] ) -> int:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=0 )
def __lowercase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase_ : str = self.full_loop()
lowerCAmelCase_ : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1E-3
def __lowercase ( self : int ) -> int:
lowerCAmelCase_ : Union[str, Any] = self.full_loop(prediction_type="""v_prediction""" )
lowerCAmelCase_ : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.091 ) < 1E-3
def __lowercase ( self : List[str] ) -> Union[str, Any]:
lowerCAmelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ : List[Any] = self.get_scheduler_config(thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0 )
lowerCAmelCase_ : Any = scheduler_class(**lowerCamelCase__ )
lowerCAmelCase_ : Dict = 10
lowerCAmelCase_ : List[Any] = self.dummy_model()
lowerCAmelCase_ : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase_ : Optional[Any] = model(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_ : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 120 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__ :
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : MutableSequence[float] ):
'''simple docstring'''
if len(lowerCamelCase__ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_UpperCamelCase : list[float] = list(lowerCamelCase__ )
_UpperCamelCase : Tuple = degree
def __add__( self : Optional[int] ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_UpperCamelCase : str = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,lowerCamelCase__ )
else:
_UpperCamelCase : str = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,lowerCamelCase__ )
def __sub__( self : Dict ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self : Dict ):
'''simple docstring'''
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self : Union[str, Any] ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : int | float ):
'''simple docstring'''
_UpperCamelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = ''
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCamelCase__ )
return polynomial
def __repr__( self : List[str] ):
'''simple docstring'''
return self.__str__()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
_UpperCamelCase : Optional[int] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : int | float = 0 ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * (self.degree + 2)
_UpperCamelCase : Any = constant
for i in range(self.degree + 1 ):
_UpperCamelCase : Optional[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,lowerCamelCase__ )
def __eq__( self : str ,lowerCamelCase__ : object ):
'''simple docstring'''
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] ,lowerCamelCase__ : object ):
'''simple docstring'''
return not self.__eq__(lowerCamelCase__ )
| 83 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : Dict = 1, A_ : List[str] = 10_00 ):
'''simple docstring'''
_lowerCamelCase : int = 1
_lowerCamelCase : Union[str, Any] = 0
for divide_by_number in range(UpperCAmelCase_, digit + 1 ):
_lowerCamelCase : list[int] = []
_lowerCamelCase : int = numerator
for _ in range(1, digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCAmelCase_ ):
_lowerCamelCase : Optional[Any] = len(UpperCAmelCase_ )
_lowerCamelCase : List[Any] = divide_by_number
else:
has_been_divided.append(UpperCAmelCase_ )
_lowerCamelCase : str = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowercase__ ( lowercase ):
@require_torch
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_UpperCamelCase : Dict = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_UpperCamelCase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_UpperCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase__ )
BertModel.from_pretrained(lowerCamelCase__ )
BertTokenizer.from_pretrained(lowerCamelCase__ )
pipeline(task='fill-mask' ,model=lowerCamelCase__ )
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_UpperCamelCase : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : str = '1'
_UpperCamelCase : Union[str, Any] = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_UpperCamelCase : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_UpperCamelCase : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_UpperCamelCase : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase__ )
BertModel.from_pretrained(lowerCamelCase__ )
BertTokenizer.from_pretrained(lowerCamelCase__ )
pipeline(task='fill-mask' ,model=lowerCamelCase__ )
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_UpperCamelCase : List[Any] = self.get_env()
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_UpperCamelCase : str = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_UpperCamelCase : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_UpperCamelCase : Optional[Any] = self.get_env()
_UpperCamelCase : int = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_UpperCamelCase : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : Dict = '1'
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : int = '\nfrom transformers import pipeline\n '
_UpperCamelCase : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_UpperCamelCase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_UpperCamelCase : Union[str, Any] = self.get_env()
_UpperCamelCase : List[Any] = '1'
_UpperCamelCase : Tuple = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_UpperCamelCase : int = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = '\nfrom transformers import AutoModel\n '
_UpperCamelCase : int = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Any = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_UpperCamelCase : Optional[Any] = self.get_env()
_UpperCamelCase : Optional[int] = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : List[Any] = '1'
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 83 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase = 16
lowerCamelCase = 32
def a_ ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] = 16 , SCREAMING_SNAKE_CASE__ : Dict = "bert-base-cased" ):
'''simple docstring'''
_lowerCamelCase : List[Any] =AutoTokenizer.from_pretrained(UpperCAmelCase_ )
_lowerCamelCase : List[Any] =load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : Dict =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : int =datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_lowerCamelCase : Optional[Any] =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowerCamelCase : Any =DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
_lowerCamelCase : int =DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
def a_ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
_lowerCamelCase : List[Any] =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_lowerCamelCase : Union[str, Any] =config['lr']
_lowerCamelCase : Optional[Any] =int(config['num_epochs'] )
_lowerCamelCase : str =int(config['seed'] )
_lowerCamelCase : List[Any] =int(config['batch_size'] )
_lowerCamelCase : int =args.model_name_or_path
set_seed(UpperCAmelCase_ )
_lowerCamelCase : Dict =get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_lowerCamelCase : str =AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
# Instantiate optimizer
_lowerCamelCase : Tuple =(
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_lowerCamelCase : Union[str, Any] =optimizer_cls(params=model.parameters() , lr=UpperCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
_lowerCamelCase : int =accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
_lowerCamelCase : List[Any] =1
_lowerCamelCase : str =(len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_lowerCamelCase : Union[str, Any] =get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase_ , )
else:
_lowerCamelCase : str =DummyScheduler(UpperCAmelCase_ , total_num_steps=UpperCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_lowerCamelCase : Dict =accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
_lowerCamelCase : str =0
# We also need to keep track of the stating epoch so files are named properly
_lowerCamelCase : int =0
# Now we train the model
_lowerCamelCase : Any =evaluate.load('glue' , 'mrpc' )
_lowerCamelCase : Union[str, Any] =0
_lowerCamelCase : str ={}
for epoch in range(UpperCAmelCase_ , UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
_lowerCamelCase : Dict =model(**UpperCAmelCase_ )
_lowerCamelCase : Dict =outputs.loss
_lowerCamelCase : List[str] =loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
_lowerCamelCase : int =0
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_lowerCamelCase : Any =model(**UpperCAmelCase_ )
_lowerCamelCase : int =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_lowerCamelCase : List[str] =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase_ ) - 1:
_lowerCamelCase : Dict =predictions[: len(eval_dataloader.dataset ) - samples_seen]
_lowerCamelCase : Union[str, Any] =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
_lowerCamelCase : Any =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , UpperCAmelCase_ )
_lowerCamelCase : Optional[Any] =eval_metric['accuracy']
if best_performance < eval_metric["accuracy"]:
_lowerCamelCase : Dict =eval_metric['accuracy']
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase_ , default=3 , help='Number of train epochs.' , )
_lowerCamelCase : List[Any] =parser.parse_args()
_lowerCamelCase : Optional[int] ={'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 199 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase__ ( unittest.TestCase ):
def __init__( self : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Dict=7 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Dict=99 ,lowerCamelCase__ : int=32 ,lowerCamelCase__ : Tuple=5 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : Any=37 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=512 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : int=4 ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Union[str, Any] = seq_length
_UpperCamelCase : Optional[Any] = is_training
_UpperCamelCase : Optional[int] = use_attention_mask
_UpperCamelCase : Any = use_token_type_ids
_UpperCamelCase : str = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : Optional[int] = type_vocab_size
_UpperCamelCase : str = type_sequence_label_size
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : List[Any] = num_choices
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=lowerCamelCase__ ,)
return config, input_ids, attention_mask
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = config_and_inputs
_UpperCamelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase : Dict = model_class_name.from_pretrained('distilbert-base-uncased' )
_UpperCamelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
_UpperCamelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase : Dict = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )[0]
_UpperCamelCase : Any = (1, 11, 768)
self.assertEqual(output.shape ,lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,lowerCamelCase__ ,atol=1E-4 ) )
| 83 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
A__ = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
snake_case_ : List[Any] = logging.get_logger(__name__)
class lowercase__ ( lowercase ):
lowercase__ = """AutoTokenizer"""
lowercase__ = ["""tokenizer"""]
lowercase__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple=None ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCamelCase : Dict = speaker_embeddings
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str="speaker_embeddings_path.json" ,**lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
_UpperCamelCase : Optional[Any] = get_file_from_repo(
lowerCamelCase__ ,lowerCamelCase__ ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,)
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowerCamelCase__ ,lowerCamelCase__ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
_UpperCamelCase : Union[str, Any] = None
else:
with open(lowerCamelCase__ ) as speaker_embeddings_json:
_UpperCamelCase : Optional[int] = json.load(lowerCamelCase__ )
else:
_UpperCamelCase : Tuple = None
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
return cls(tokenizer=lowerCamelCase__ ,speaker_embeddings=lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int="speaker_embeddings_path.json" ,lowerCamelCase__ : Dict="speaker_embeddings" ,lowerCamelCase__ : bool = False ,**lowerCamelCase__ : Tuple ,):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ,'v2' ) ,exist_ok=lowerCamelCase__ )
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCamelCase : Any = self._load_voice_preset(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,lowerCamelCase__ ,F'{prompt_key}_{key}' ) ,voice_preset[key] ,allow_pickle=lowerCamelCase__ ,)
_UpperCamelCase : List[str] = os.path.join(lowerCamelCase__ ,F'{prompt_key}_{key}.npy' )
_UpperCamelCase : str = tmp_dict
with open(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) ,'w' ) as fp:
json.dump(lowerCamelCase__ ,lowerCamelCase__ )
super().save_pretrained(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str = None ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.speaker_embeddings[voice_preset]
_UpperCamelCase : Union[str, Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
_UpperCamelCase : Dict = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,)
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
_UpperCamelCase : List[str] = np.load(lowerCamelCase__ )
return voice_preset_dict
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : Any ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Any="pt" ,lowerCamelCase__ : Dict=256 ,lowerCamelCase__ : int=False ,lowerCamelCase__ : int=True ,lowerCamelCase__ : List[str]=False ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
if voice_preset is not None and not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
if (
isinstance(lowerCamelCase__ ,lowerCamelCase__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCamelCase : Optional[int] = self._load_voice_preset(lowerCamelCase__ )
else:
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and not voice_preset.endswith('.npz' ):
_UpperCamelCase : Tuple = voice_preset + '.npz'
_UpperCamelCase : str = np.load(lowerCamelCase__ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.tokenizer(
lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,padding='max_length' ,max_length=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
if voice_preset is not None:
_UpperCamelCase : Optional[Any] = voice_preset
return encoded_text
| 83 | 0 |
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def lowercase ( ) -> Any:
_UpperCamelCase = Github(os.environ['''GITHUB_TOKEN'''] )
_UpperCamelCase = g.get_repo('''huggingface/diffusers''' )
_UpperCamelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
_UpperCamelCase = sorted(issue.get_comments() , key=lambda a__ : i.created_at , reverse=UpperCAmelCase_ )
_UpperCamelCase = comments[0] if len(UpperCAmelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 256 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case_ : Tuple = random.Random()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=1.0 , UpperCAmelCase_=None , UpperCAmelCase_=None ):
if rng is None:
_UpperCamelCase : Dict = global_rng
_UpperCamelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase ):
def __init__( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=7 ,lowerCamelCase__ : str=400 ,lowerCamelCase__ : int=2000 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=16000 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Optional[int]=True ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : List[str] = min_seq_length
_UpperCamelCase : Optional[int] = max_seq_length
_UpperCamelCase : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCamelCase : List[str] = feature_size
_UpperCamelCase : List[str] = padding_value
_UpperCamelCase : List[Any] = sampling_rate
_UpperCamelCase : Dict = return_attention_mask
_UpperCamelCase : Tuple = do_normalize
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Tuple=False ):
'''simple docstring'''
def _flatten(lowerCamelCase__ : Optional[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_UpperCamelCase : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCamelCase : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
_UpperCamelCase : int = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = WavaVecaFeatureExtractor
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCamelCase__ ,axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ,axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCamelCase : int = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Tuple = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCamelCase : Tuple = feat_extract(speech_inputs[0] ,return_tensors='np' ).input_values
_UpperCamelCase : Any = feat_extract(np_speech_inputs[0] ,return_tensors='np' ).input_values
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
# Test batched
_UpperCamelCase : Union[str, Any] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
_UpperCamelCase : Optional[int] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCamelCase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCamelCase : str = np.asarray(lowerCamelCase__ )
_UpperCamelCase : List[str] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
_UpperCamelCase : int = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
_UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = feat_extract(lowerCamelCase__ ,padding=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='np' )
_UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : List[str] = range(800 ,1400 ,200 )
_UpperCamelCase : List[str] = [floats_list((1, x) )[0] for x in lengths]
_UpperCamelCase : Optional[Any] = ['longest', 'max_length', 'do_not_pad']
_UpperCamelCase : str = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : List[str] = feat_extract(lowerCamelCase__ ,max_length=lowerCamelCase__ ,padding=lowerCamelCase__ )
_UpperCamelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Union[str, Any] = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=1000 ,padding='max_length' ,return_tensors='np' )
_UpperCamelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : int = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=1000 ,padding='longest' ,return_tensors='np' )
_UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Any = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=2000 ,padding='longest' ,return_tensors='np' )
_UpperCamelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
import torch
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = np.random.rand(100 ).astype(np.floataa )
_UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCamelCase : Optional[int] = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCamelCase : Tuple = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_UpperCamelCase : Optional[int] = WavaVecaConfig.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask ,config.feat_extract_norm == 'layer' )
| 83 | 0 |
"""simple docstring"""
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCamelCase : int = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCAmelCase_ )
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
A_ : List[str] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCAmelCase_ , id=UpperCAmelCase_ )
| 167 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1_0_0_0 ):
_UpperCamelCase : int = 1
_UpperCamelCase : Union[str, Any] = 0
for divide_by_number in range(UpperCAmelCase_ , digit + 1 ):
_UpperCamelCase : list[int] = []
_UpperCamelCase : int = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = len(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = divide_by_number
else:
has_been_divided.append(UpperCAmelCase_ )
_UpperCamelCase : str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
a__ : Optional[Any] =logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[Any] , *__A : List[str] , **__A : Tuple ):
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
self.check_model_type(lowerCamelCase__ )
def _lowerCamelCase ( self : List[str] , __A : int=None , __A : int=None , __A : int=None , **__A : Dict ):
__UpperCamelCase = {}, {}
if padding is not None:
__UpperCamelCase = padding
if truncation is not None:
__UpperCamelCase = truncation
if top_k is not None:
__UpperCamelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : int , __A : Union["Image.Image", str] , __A : str = None , **__A : List[Any] ):
if isinstance(lowerCamelCase__ , (Image.Image, str) ) and isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase = {'image': image, 'question': question}
else:
__UpperCamelCase = image
__UpperCamelCase = super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
return results
def _lowerCamelCase ( self : Union[str, Any] , __A : str , __A : Any=False , __A : int=False ):
__UpperCamelCase = load_image(inputs['image'] )
__UpperCamelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )
__UpperCamelCase = self.image_processor(images=lowerCamelCase__ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase__ )
return model_inputs
def _lowerCamelCase ( self : Dict , __A : Optional[Any] ):
__UpperCamelCase = self.model(**lowerCamelCase__ )
return model_outputs
def _lowerCamelCase ( self : List[Any] , __A : Dict , __A : Any=5 ):
if top_k > self.model.config.num_labels:
__UpperCamelCase = self.model.config.num_labels
if self.framework == "pt":
__UpperCamelCase = model_outputs.logits.sigmoid()[0]
__UpperCamelCase = probs.topk(lowerCamelCase__ )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__UpperCamelCase = scores.tolist()
__UpperCamelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase__ , lowerCamelCase__ )]
| 53 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
if num < 0:
return False
_UpperCamelCase : int = num
_UpperCamelCase : int = 0
while num > 0:
_UpperCamelCase : str = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
from PIL import Image
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(_lowerCAmelCase) -> int:
return int(1_28 + factor * (c - 1_28))
return img.point(UpperCAmelCase_)
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
UpperCAmelCase : List[str] =change_contrast(img, 170)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 128 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = abs(UpperCAmelCase_ )
_UpperCamelCase : int = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = abs(UpperCAmelCase_ )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def A__ ( UpperCAmelCase_ ):
return sum(int(UpperCAmelCase_ ) for c in str(abs(UpperCAmelCase_ ) ) )
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
_UpperCamelCase : str = f'{func.__name__}({value})'
_UpperCamelCase : Tuple = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(UpperCAmelCase_ )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 83 | 0 |
class lowercase :
def __init__( self ,A__):
lowercase = len(lowerCamelCase__)
lowercase = [0] * len_array
if len_array > 0:
lowercase = array[0]
for i in range(1 ,lowerCamelCase__):
lowercase = self.prefix_sum[i - 1] + array[i]
def A__ ( self ,A__ ,A__):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A__ ( self ,A__):
lowercase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCamelCase__)
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
'''simple docstring'''
from math import pi
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 83 | 0 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: Dict ,__UpperCamelCase: Dict ):
"""simple docstring"""
if not len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
SCREAMING_SNAKE_CASE : Dict = equationa
SCREAMING_SNAKE_CASE : Any = equationa
# Calculate the determinants of the matrices
SCREAMING_SNAKE_CASE : Optional[int] = aa * ba - aa * ba
SCREAMING_SNAKE_CASE : Optional[int] = ca * ba - ca * ba
SCREAMING_SNAKE_CASE : int = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
SCREAMING_SNAKE_CASE : Any = determinant_x / determinant
SCREAMING_SNAKE_CASE : Dict = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 251 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class lowercase__ ( lowercase ):
lowercase__ = """mvp"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] ,lowerCamelCase__ : Any=50267 ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0_2 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=0 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Tuple=100 ,lowerCamelCase__ : Optional[int]=800 ,**lowerCamelCase__ : int ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Union[str, Any] = max_position_embeddings
_UpperCamelCase : Dict = d_model
_UpperCamelCase : Any = encoder_ffn_dim
_UpperCamelCase : Dict = encoder_layers
_UpperCamelCase : Optional[Any] = encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : int = decoder_attention_heads
_UpperCamelCase : str = dropout
_UpperCamelCase : str = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : Dict = activation_function
_UpperCamelCase : List[str] = init_std
_UpperCamelCase : Dict = encoder_layerdrop
_UpperCamelCase : Tuple = decoder_layerdrop
_UpperCamelCase : Optional[int] = classifier_dropout
_UpperCamelCase : str = use_cache
_UpperCamelCase : Union[str, Any] = encoder_layers
_UpperCamelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : Any = use_prompt
_UpperCamelCase : Optional[int] = prompt_length
_UpperCamelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,forced_eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 83 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : List[str] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
lowerCAmelCase : Tuple = 1_2_8
elif "12-12" in model_name:
lowerCAmelCase : Tuple = 1_2
lowerCAmelCase : Tuple = 1_2
elif "14-14" in model_name:
lowerCAmelCase : Optional[Any] = 1_4
lowerCAmelCase : Union[str, Any] = 1_4
elif "16-16" in model_name:
lowerCAmelCase : Dict = 1_6
lowerCAmelCase : Optional[Any] = 1_6
else:
raise ValueError("Model not supported" )
lowerCAmelCase : Optional[int] = 'huggingface/label-files'
if "speech-commands" in model_name:
lowerCAmelCase : List[str] = 3_5
lowerCAmelCase : List[str] = 'speech-commands-v2-id2label.json'
else:
lowerCAmelCase : Union[str, Any] = 5_2_7
lowerCAmelCase : str = 'audioset-id2label.json'
lowerCAmelCase : Dict = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="dataset" ) , "r" ) )
lowerCAmelCase : List[Any] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase : Any = idalabel
lowerCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if "module.v" in name:
lowerCAmelCase : str = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
lowerCAmelCase : Dict = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
lowerCAmelCase : str = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
lowerCAmelCase : Dict = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
lowerCAmelCase : List[Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
lowerCAmelCase : List[str] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
lowerCAmelCase : Optional[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCAmelCase : Dict = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCAmelCase : Optional[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCAmelCase : str = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCAmelCase : List[Any] = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
lowerCAmelCase : Any = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
lowerCAmelCase : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
lowerCAmelCase : int = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase : Tuple = orig_state_dict.pop(UpperCAmelCase_ )
if "qkv" in key:
lowerCAmelCase : List[str] = key.split("." )
lowerCAmelCase : List[Any] = int(key_split[3] )
lowerCAmelCase : str = config.hidden_size
if "weight" in key:
lowerCAmelCase : Tuple = val[:dim, :]
lowerCAmelCase : Optional[int] = val[dim : dim * 2, :]
lowerCAmelCase : List[str] = val[-dim:, :]
else:
lowerCAmelCase : Optional[Any] = val[:dim]
lowerCAmelCase : Union[str, Any] = val[dim : dim * 2]
lowerCAmelCase : Union[str, Any] = val[-dim:]
else:
lowerCAmelCase : Tuple = val
return orig_state_dict
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : List[str] = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any=False ):
'''simple docstring'''
lowerCAmelCase : str = get_audio_spectrogram_transformer_config(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
lowerCAmelCase : List[Any] = model_name_to_url[model_name]
lowerCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location="cpu" )
# remove some keys
remove_keys(UpperCAmelCase_ )
# rename some keys
lowerCAmelCase : Union[str, Any] = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
# load 🤗 model
lowerCAmelCase : Dict = ASTForAudioClassification(UpperCAmelCase_ )
model.eval()
model.load_state_dict(UpperCAmelCase_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
lowerCAmelCase : List[Any] = -4.2_677_393 if 'speech-commands' not in model_name else -6.845_978
lowerCAmelCase : str = 4.5_689_974 if 'speech-commands' not in model_name else 5.5_654_526
lowerCAmelCase : Tuple = 1_0_2_4 if 'speech-commands' not in model_name else 1_2_8
lowerCAmelCase : Union[str, Any] = ASTFeatureExtractor(mean=UpperCAmelCase_ , std=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
if "speech-commands" in model_name:
lowerCAmelCase : Union[str, Any] = load_dataset("speech_commands" , "v0.02" , split="validation" )
lowerCAmelCase : List[Any] = dataset[0]['audio']['array']
else:
lowerCAmelCase : int = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
lowerCAmelCase : List[Any] = torchaudio.load(UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = waveform.squeeze().numpy()
lowerCAmelCase : Tuple = feature_extractor(UpperCAmelCase_ , sampling_rate=1_6_0_0_0 , return_tensors="pt" )
# forward pass
lowerCAmelCase : List[str] = model(**UpperCAmelCase_ )
lowerCAmelCase : List[Any] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
lowerCAmelCase : List[str] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
lowerCAmelCase : int = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
lowerCAmelCase : List[str] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
lowerCAmelCase : Optional[Any] = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
lowerCAmelCase : List[Any] = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
lowerCAmelCase : List[str] = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
lowerCAmelCase : Optional[Any] = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
lowerCAmelCase : Optional[Any] = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1E-4 ):
raise ValueError("Logits don\'t match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase_ )
print(f"""Saving feature extractor to {pytorch_dump_folder_path}""" )
feature_extractor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(f"""MIT/{model_name}""" )
feature_extractor.push_to_hub(f"""MIT/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''ast-finetuned-audioset-10-10-0.4593''',
type=str,
help='''Name of the Audio Spectrogram Transformer model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 108 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase__ ( lowercase ):
lowercase__ = """openai/whisper-base"""
lowercase__ = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
lowercase__ = """transcriber"""
lowercase__ = WhisperProcessor
lowercase__ = WhisperForConditionalGeneration
lowercase__ = ["""audio"""]
lowercase__ = ["""text"""]
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ ,return_tensors='pt' ).input_features
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )[0]
| 83 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__A : Dict = logging.getLogger(__name__)
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'summarization'
lowercase = ['loss']
lowercase = ROUGE_KEYS
lowercase = 'rouge2'
def __init__( self : Any , lowerCamelCase : Dict , **lowerCamelCase : Optional[Any] ) -> str:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCAmelCase_ : Optional[Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCamelCase__ , num_labels=lowerCamelCase__ , mode=self.mode , **lowerCamelCase__ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
lowerCAmelCase_ : Any = Path(self.output_dir ) / 'metrics.json'
lowerCAmelCase_ : int = Path(self.output_dir ) / 'hparams.pkl'
pickle_save(self.hparams , self.hparams_save_path )
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : int = defaultdict(lowerCamelCase__ )
lowerCAmelCase_ : Dict = self.config.model_type
lowerCAmelCase_ : List[str] = self.config.tgt_vocab_size if self.model_type == 'fsmt' else self.config.vocab_size
lowerCAmelCase_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCAmelCase_ : Optional[int] = {
'train': self.hparams.n_train,
'val': self.hparams.n_val,
'test': self.hparams.n_test,
}
lowerCAmelCase_ : Dict = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCAmelCase_ : List[str] = {
'train': self.hparams.max_target_length,
'val': self.hparams.val_max_target_length,
'test': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], F'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCAmelCase_ : int = get_git_info()['repo_sha']
lowerCAmelCase_ : List[str] = hparams.num_workers
lowerCAmelCase_ : Dict = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase__ ):
lowerCAmelCase_ : Optional[Any] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCAmelCase_ : int = self.decoder_start_token_id
lowerCAmelCase_ : str = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : int = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCAmelCase_ : Any = self.hparams.eval_max_gen_length
else:
lowerCAmelCase_ : List[str] = self.model.config.max_length
lowerCAmelCase_ : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __lowercase ( self : Dict , lowerCamelCase : Dict[str, torch.Tensor] ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if 'mask' not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase__ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
lowerCAmelCase_ : Any = True
return readable_batch
def __lowercase ( self : Optional[int] , lowerCamelCase : str , **lowerCamelCase : int ) -> int:
return self.model(lowerCamelCase__ , **lowerCamelCase__ )
def __lowercase ( self : Tuple , lowerCamelCase : List[int] ) -> int:
lowerCAmelCase_ : List[Any] = self.tokenizer.batch_decode(
lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )
return lmap(str.strip , lowerCamelCase__ )
def __lowercase ( self : Tuple , lowerCamelCase : dict ) -> int:
lowerCAmelCase_ : Optional[Any] = self.tokenizer.pad_token_id
lowerCAmelCase_ : Dict = batch['input_ids'], batch['attention_mask']
lowerCAmelCase_ : Optional[Any] = batch['labels']
if isinstance(self.model , lowerCamelCase__ ):
lowerCAmelCase_ : Optional[int] = self.model._shift_right(lowerCamelCase__ )
else:
lowerCAmelCase_ : Union[str, Any] = shift_tokens_right(lowerCamelCase__ , lowerCamelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCAmelCase_ : Dict = decoder_input_ids
self.save_readable_batch(lowerCamelCase__ )
lowerCAmelCase_ : int = self(lowerCamelCase__ , attention_mask=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ , use_cache=lowerCamelCase__ )
lowerCAmelCase_ : List[Any] = outputs['logits']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCAmelCase_ : int = nn.CrossEntropyLoss(ignore_index=lowerCamelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
lowerCAmelCase_ : Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCAmelCase_ : Optional[Any] = nn.functional.log_softmax(lowerCamelCase__ , dim=-1 )
lowerCAmelCase_ : Dict = label_smoothed_nll_loss(
lowerCamelCase__ , lowerCamelCase__ , self.hparams.label_smoothing , ignore_index=lowerCamelCase__ )
return (loss,)
@property
def __lowercase ( self : int ) -> Union[str, Any]:
return self.tokenizer.pad_token_id
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : int ) -> Optional[int]:
lowerCAmelCase_ : Union[str, Any] = self._step(lowerCamelCase__ )
lowerCAmelCase_ : List[Any] = dict(zip(self.loss_names , lowerCamelCase__ ) )
# tokens per batch
lowerCAmelCase_ : int = batch['input_ids'].ne(self.pad ).sum() + batch['labels'].ne(self.pad ).sum()
lowerCAmelCase_ : Any = batch['input_ids'].shape[0]
lowerCAmelCase_ : Optional[Any] = batch['input_ids'].eq(self.pad ).sum()
lowerCAmelCase_ : Tuple = batch['input_ids'].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __lowercase ( self : Optional[int] , lowerCamelCase : Any , lowerCamelCase : List[str] ) -> Optional[Any]:
return self._generative_step(lowerCamelCase__ )
def __lowercase ( self : Tuple , lowerCamelCase : Dict , lowerCamelCase : Optional[Any]="val" ) -> Optional[int]:
self.step_count += 1
lowerCAmelCase_ : Any = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCAmelCase_ : Optional[int] = losses['loss']
lowerCAmelCase_ : Optional[Any] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ['gen_time', 'gen_len']
}
lowerCAmelCase_ : Tuple = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCAmelCase_ : torch.FloatTensor = torch.tensor(lowerCamelCase__ ).type_as(lowerCamelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase__ )
lowerCAmelCase_ : Dict = {F'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCAmelCase_ : Tuple = self.step_count
self.metrics[prefix].append(lowerCamelCase__ ) # callback writes this to self.metrics_save_path
lowerCAmelCase_ : Optional[int] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'{prefix}_loss': loss,
F'{prefix}_{self.val_metric}': metric_tensor,
}
def __lowercase ( self : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Any:
return calculate_rouge(lowerCamelCase__ , lowerCamelCase__ )
def __lowercase ( self : str , lowerCamelCase : dict ) -> int:
lowerCAmelCase_ : Any = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCAmelCase_ : Any = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCAmelCase_ : Tuple = (time.time() - ta) / batch['input_ids'].shape[0]
lowerCAmelCase_ : List[str] = self.ids_to_clean_text(lowerCamelCase__ )
lowerCAmelCase_ : List[str] = self.ids_to_clean_text(batch["""labels"""] )
lowerCAmelCase_ : List[Any] = self._step(lowerCamelCase__ )
lowerCAmelCase_ : int = dict(zip(self.loss_names , lowerCamelCase__ ) )
lowerCAmelCase_ : Dict = self.calc_generative_metrics(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase_ : Dict = np.mean(lmap(lowerCamelCase__ , lowerCamelCase__ ) )
base_metrics.update(gen_time=lowerCamelCase__ , gen_len=lowerCamelCase__ , preds=lowerCamelCase__ , target=lowerCamelCase__ , **lowerCamelCase__ )
return base_metrics
def __lowercase ( self : Any , lowerCamelCase : Dict , lowerCamelCase : str ) -> int:
return self._generative_step(lowerCamelCase__ )
def __lowercase ( self : Tuple , lowerCamelCase : Dict ) -> List[str]:
return self.validation_epoch_end(lowerCamelCase__ , prefix="""test""" )
def __lowercase ( self : Any , lowerCamelCase : str ) -> Dict:
lowerCAmelCase_ : int = self.n_obs[type_path]
lowerCAmelCase_ : Optional[int] = self.target_lens[type_path]
lowerCAmelCase_ : int = self.dataset_class(
self.tokenizer , type_path=lowerCamelCase__ , n_obs=lowerCamelCase__ , max_target_length=lowerCamelCase__ , **self.dataset_kwargs , )
return dataset
def __lowercase ( self : int , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : bool = False ) -> List[str]:
lowerCAmelCase_ : Any = self.get_dataset(lowerCamelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCAmelCase_ : List[str] = dataset.make_sortish_sampler(lowerCamelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase__ , batch_size=lowerCamelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase__ , num_workers=self.num_workers , sampler=lowerCamelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCAmelCase_ : Union[str, Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase__ , batch_sampler=lowerCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCamelCase__ , batch_size=lowerCamelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase__ , num_workers=self.num_workers , sampler=lowerCamelCase__ , )
def __lowercase ( self : List[Any] ) -> Optional[int]:
lowerCAmelCase_ : Optional[Any] = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase__ )
return dataloader
def __lowercase ( self : List[str] ) -> Optional[int]:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def __lowercase ( self : Union[str, Any] ) -> List[Any]:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __lowercase ( lowerCamelCase : Any , lowerCamelCase : Tuple ) -> Optional[int]:
BaseTransformer.add_model_specific_args(lowerCamelCase__ , lowerCamelCase__ )
add_generic_args(lowerCamelCase__ , lowerCamelCase__ )
parser.add_argument(
"""--max_source_length""" , default=10_24 , type=lowerCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=lowerCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_42 , type=lowerCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_42 , type=lowerCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCamelCase__ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCamelCase__ )
parser.add_argument("""--max_tokens_per_batch""" , type=lowerCamelCase__ , default=lowerCamelCase__ )
parser.add_argument("""--logger_name""" , type=lowerCamelCase__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowerCamelCase__ , default=5_00 , required=lowerCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowerCamelCase__ , default="""summarization""" , required=lowerCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowerCamelCase__ , default=0.0 , required=lowerCamelCase__ )
parser.add_argument("""--src_lang""" , type=lowerCamelCase__ , default="""""" , required=lowerCamelCase__ )
parser.add_argument("""--tgt_lang""" , type=lowerCamelCase__ , default="""""" , required=lowerCamelCase__ )
parser.add_argument("""--eval_beams""" , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ )
parser.add_argument(
"""--val_metric""" , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowerCamelCase__ , default=lowerCamelCase__ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowerCamelCase__ , default=1 , required=lowerCamelCase__ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowerCamelCase__ , default=-1 , required=lowerCamelCase__ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'translation'
lowercase = ['loss']
lowercase = ['bleu']
lowercase = 'bleu'
def __init__( self : Tuple , lowerCamelCase : List[Any] , **lowerCamelCase : List[Any] ) -> Union[str, Any]:
super().__init__(lowerCamelCase__ , **lowerCamelCase__ )
lowerCAmelCase_ : Tuple = hparams.src_lang
lowerCAmelCase_ : Tuple = hparams.tgt_lang
def __lowercase ( self : Dict , lowerCamelCase : Union[str, Any] , lowerCamelCase : str ) -> Optional[int]:
return calculate_bleu(lowerCamelCase__ , lowerCamelCase__ )
def UpperCamelCase_ ( A__ : List[Any] , A__ : Any=None ):
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=UpperCAmelCase_ )
check_output_dir(UpperCAmelCase_ , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCAmelCase_ : SummarizationModule = SummarizationModule(UpperCAmelCase_ )
else:
lowerCAmelCase_ : SummarizationModule = TranslationModule(UpperCAmelCase_ )
lowerCAmelCase_ : str = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
lowerCAmelCase_ : Any = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase_ : Optional[Any] = os.environ.get("""WANDB_PROJECT""" , UpperCAmelCase_ )
lowerCAmelCase_ : Any = WandbLogger(name=model.output_dir.name , project=UpperCAmelCase_ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCAmelCase_ : List[Any] = WandbLogger(name=model.output_dir.name , project=f'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCAmelCase_ : List[Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[Any] = args.val_metric == 'loss'
lowerCAmelCase_ : pl.Trainer = generic_train(
UpperCAmelCase_ , UpperCAmelCase_ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , UpperCAmelCase_ ) , early_stopping_callback=UpperCAmelCase_ , logger=UpperCAmelCase_ , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
lowerCAmelCase_ : List[str] = ''
lowerCAmelCase_ : int = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=UpperCAmelCase_ ) )
if checkpoints:
lowerCAmelCase_ : Optional[Any] = checkpoints[-1]
lowerCAmelCase_ : Union[str, Any] = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
__A : Tuple = pl.Trainer.add_argparse_args(parser)
__A : Tuple = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__A : Optional[int] = parser.parse_args()
main(args)
| 120 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
snake_case_ : str = logging.getLogger(__name__)
def A__ ( ):
_UpperCamelCase : List[Any] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=UpperCAmelCase_ , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=UpperCAmelCase_ , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=UpperCAmelCase_ , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=UpperCAmelCase_ , default='data/dump' , help='The dump file prefix.' )
_UpperCamelCase : Any = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Optional[int] = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
_UpperCamelCase : Dict = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
_UpperCamelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Any = tokenizer.special_tokens_map['cls_token'] # `<s>`
_UpperCamelCase : int = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
_UpperCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Optional[Any] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
_UpperCamelCase : Any = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
_UpperCamelCase : List[Any] = fp.readlines()
logger.info('Start encoding' )
logger.info(f'{len(UpperCAmelCase_ )} examples to process.' )
_UpperCamelCase : int = []
_UpperCamelCase : Any = 0
_UpperCamelCase : Any = 1_0_0_0_0
_UpperCamelCase : Optional[Any] = time.time()
for text in data:
_UpperCamelCase : List[Any] = f'{bos} {text.strip()} {sep}'
_UpperCamelCase : Any = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
rslt.append(UpperCAmelCase_ )
iter += 1
if iter % interval == 0:
_UpperCamelCase : Union[str, Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
_UpperCamelCase : Tuple = time.time()
logger.info('Finished binarization' )
logger.info(f'{len(UpperCAmelCase_ )} examples processed.' )
_UpperCamelCase : Optional[int] = f'{args.dump_file}.{args.tokenizer_name}.pickle'
_UpperCamelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
_UpperCamelCase : List[Any] = [np.uintaa(UpperCAmelCase_ ) for d in rslt]
else:
_UpperCamelCase : Any = [np.intaa(UpperCAmelCase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(UpperCAmelCase_ , 'wb' ) as handle:
pickle.dump(rslt_ , UpperCAmelCase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 83 | 0 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __snake_case ( _lowercase):
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_lowerCamelCase : Dict = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_lowerCamelCase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase__ )
BertModel.from_pretrained(lowerCamelCase__ )
BertTokenizer.from_pretrained(lowerCamelCase__ )
pipeline(task='''fill-mask''' , model=lowerCamelCase__ )
# baseline - just load from_pretrained with normal network
_lowerCamelCase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_lowerCamelCase : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCamelCase : str = '1'
_lowerCamelCase : Union[str, Any] = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_lowerCamelCase : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_lowerCamelCase : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_lowerCamelCase : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase__ )
BertModel.from_pretrained(lowerCamelCase__ )
BertTokenizer.from_pretrained(lowerCamelCase__ )
pipeline(task='''fill-mask''' , model=lowerCamelCase__ )
# baseline - just load from_pretrained with normal network
_lowerCamelCase : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_lowerCamelCase : List[Any] = self.get_env()
_lowerCamelCase : Dict = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_lowerCamelCase : str = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_lowerCamelCase : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_lowerCamelCase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_lowerCamelCase : Optional[Any] = self.get_env()
_lowerCamelCase : int = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
_lowerCamelCase : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCamelCase : Dict = '1'
_lowerCamelCase : Dict = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = '\nfrom transformers import pipeline\n '
_lowerCamelCase : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_lowerCamelCase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_lowerCamelCase : Union[str, Any] = self.get_env()
_lowerCamelCase : List[Any] = '1'
_lowerCamelCase : Tuple = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_lowerCamelCase : int = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = '\nfrom transformers import AutoModel\n '
_lowerCamelCase : int = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_lowerCamelCase : Any = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_lowerCamelCase : Optional[Any] = self.get_env()
_lowerCamelCase : Optional[int] = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_lowerCamelCase : List[Any] = '1'
_lowerCamelCase : Dict = subprocess.run(lowerCamelCase__ , env=lowerCamelCase__ , check=lowerCamelCase__ , capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 72 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
snake_case_ : List[Any] = None
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Dict = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
snake_case_ : List[str] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
snake_case_ : List[str] = '▁'
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = AlbertTokenizer
def __init__( self : Tuple ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : Optional[int]="[CLS]" ,lowerCamelCase__ : Union[str, Any]="[SEP]" ,lowerCamelCase__ : Optional[int]="<unk>" ,lowerCamelCase__ : str="[SEP]" ,lowerCamelCase__ : List[Any]="<pad>" ,lowerCamelCase__ : Dict="[CLS]" ,lowerCamelCase__ : int="[MASK]" ,**lowerCamelCase__ : Any ,):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCamelCase : Dict = (
AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ,normalized=lowerCamelCase__ )
if isinstance(lowerCamelCase__ ,lowerCamelCase__ )
else mask_token
)
super().__init__(
lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,remove_space=lowerCamelCase__ ,keep_accents=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,**lowerCamelCase__ ,)
_UpperCamelCase : Tuple = do_lower_case
_UpperCamelCase : str = remove_space
_UpperCamelCase : Optional[Any] = keep_accents
_UpperCamelCase : Dict = vocab_file
_UpperCamelCase : Dict = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : List[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : int = [self.sep_token_id]
_UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCamelCase : Dict = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file ,lowerCamelCase__ )
return (out_vocab_file,)
| 83 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
lowerCamelCase = logging.getLogger(__name__)
@dataclass
class A :
UpperCamelCase__ : Optional[int] =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase__ : Union[str, Any] =field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase__ : Optional[Any] =field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase__ : Optional[int] =field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase__ : Tuple =field(
default=UpperCamelCase_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
UpperCamelCase__ : Union[str, Any] =field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase__ : Optional[Any] =field(
default=UpperCamelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class A :
UpperCamelCase__ : int =field(default=UpperCamelCase_ , metadata={'help': 'The input training data file (a text file).'} )
UpperCamelCase__ : str =field(
default=UpperCamelCase_ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
UpperCamelCase__ : int =field(
default=UpperCamelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
UpperCamelCase__ : Dict =field(
default=UpperCamelCase_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase__ : List[Any] =field(
default=UpperCamelCase_ , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase__ : Union[str, Any] =field(
default=UpperCamelCase_ , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
UpperCamelCase__ : int =field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase__ : Any =field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def lowerCamelCase ( self : str ) -> Dict:
"""simple docstring"""
if self.train_file is not None:
_lowerCamelCase : List[Any] =self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_lowerCamelCase : Union[str, Any] =self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class A :
UpperCamelCase__ : List[str] =42
UpperCamelCase__ : Any =True
UpperCamelCase__ : List[str] =None
UpperCamelCase__ : Any =None
def __call__( self : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[str] ='label' if 'label' in features[0].keys() else 'labels'
_lowerCamelCase : List[Any] =[feature.pop(lowerCamelCase__ ) for feature in features]
_lowerCamelCase : Dict =len(lowerCamelCase__ )
_lowerCamelCase : List[str] =len(features[0]['input_ids'] )
_lowerCamelCase : List[Any] =[
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase__ )] for feature in features
]
_lowerCamelCase : str =list(chain(*lowerCamelCase__ ) )
_lowerCamelCase : Tuple =self.tokenizer.pad(
lowerCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
_lowerCamelCase : str ={k: v.view(lowerCamelCase__ , lowerCamelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
_lowerCamelCase : Optional[int] =torch.tensor(lowerCamelCase__ , dtype=torch.intaa )
return batch
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Any =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase : Optional[int] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase : str =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Optional[Any] =training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
datasets.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_lowerCamelCase : Union[str, Any] =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : List[str] =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_lowerCamelCase : Optional[int] ={}
if data_args.train_file is not None:
_lowerCamelCase : Tuple =data_args.train_file
if data_args.validation_file is not None:
_lowerCamelCase : Tuple =data_args.validation_file
_lowerCamelCase : Any =data_args.train_file.split('.' )[-1]
_lowerCamelCase : Union[str, Any] =load_dataset(
UpperCAmelCase_ , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_lowerCamelCase : List[str] =load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCamelCase : Union[str, Any] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCamelCase : int =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCamelCase : Dict =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_lowerCamelCase : Any =[F'''ending{i}''' for i in range(4 )]
_lowerCamelCase : int ='sent1'
_lowerCamelCase : List[str] ='sent2'
if data_args.max_seq_length is None:
_lowerCamelCase : int =tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_lowerCamelCase : int =1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_lowerCamelCase : Optional[int] =min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(SCREAMING_SNAKE_CASE__ : Dict ):
_lowerCamelCase : str =[[context] * 4 for context in examples[context_name]]
_lowerCamelCase : Optional[Any] =examples[question_header_name]
_lowerCamelCase : Tuple =[
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(UpperCAmelCase_ )
]
# Flatten out
_lowerCamelCase : Optional[int] =list(chain(*UpperCAmelCase_ ) )
_lowerCamelCase : Optional[Any] =list(chain(*UpperCAmelCase_ ) )
# Tokenize
_lowerCamelCase : Tuple =tokenizer(
UpperCAmelCase_ , UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_lowerCamelCase : Optional[Any] =raw_datasets['train']
if data_args.max_train_samples is not None:
_lowerCamelCase : Tuple =min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
_lowerCamelCase : Tuple =train_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_lowerCamelCase : Union[str, Any] =train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_lowerCamelCase : str =raw_datasets['validation']
if data_args.max_eval_samples is not None:
_lowerCamelCase : Union[str, Any] =min(len(UpperCAmelCase_ ) , data_args.max_eval_samples )
_lowerCamelCase : str =eval_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_lowerCamelCase : Dict =eval_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_lowerCamelCase : List[Any] =(
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(SCREAMING_SNAKE_CASE__ : Tuple ):
_lowerCamelCase : Union[str, Any] =eval_predictions
_lowerCamelCase : List[str] =np.argmax(UpperCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_lowerCamelCase : Optional[int] =Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
# Training
if training_args.do_train:
_lowerCamelCase : Optional[int] =None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : str =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : int =last_checkpoint
_lowerCamelCase : List[str] =trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_lowerCamelCase : Union[str, Any] =train_result.metrics
_lowerCamelCase : Optional[Any] =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
_lowerCamelCase : Optional[Any] =min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('train' , UpperCAmelCase_ )
trainer.save_metrics('train' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_lowerCamelCase : List[Any] =trainer.evaluate()
_lowerCamelCase : Dict =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase_ )
_lowerCamelCase : int =min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('eval' , UpperCAmelCase_ )
trainer.save_metrics('eval' , UpperCAmelCase_ )
_lowerCamelCase : Optional[int] ={
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
def a_ ( SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 199 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( lowercase ):
def __init__( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : str = dataset
_UpperCamelCase : Optional[Any] = process
_UpperCamelCase : Optional[Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.dataset[i]
_UpperCamelCase : Dict = self.process(lowerCamelCase__ ,**self.params )
return processed
class lowercase__ ( lowercase ):
def __init__( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int]=None ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = loader
_UpperCamelCase : Tuple = infer
_UpperCamelCase : List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCamelCase : Any = None
_UpperCamelCase : Union[str, Any] = loader_batch_size
# Internal bookkeeping
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : str = None
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = iter(self.loader )
return self
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
if isinstance(self._loader_batch_data ,torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCamelCase : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCamelCase : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
# Convert ModelOutput to tuple first
_UpperCamelCase : str = element.to_tuple()
if isinstance(element[0] ,torch.Tensor ):
_UpperCamelCase : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
_UpperCamelCase : str = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] ,torch.Tensor ):
_UpperCamelCase : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
_UpperCamelCase : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCamelCase : Optional[int] = None
elif isinstance(element[self._loader_batch_index] ,torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase : int = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] ,np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase : Optional[Any] = np.expand_dims(element[self._loader_batch_index] ,0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCamelCase : Union[str, Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCamelCase : Optional[int] = self._loader_batch_data.__class__(lowerCamelCase__ )
self._loader_batch_index += 1
return result
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCamelCase : Tuple = next(self.iterator )
_UpperCamelCase : List[str] = self.infer(lowerCamelCase__ ,**self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase__ ,torch.Tensor ):
_UpperCamelCase : List[Any] = processed
else:
_UpperCamelCase : List[Any] = list(processed.keys() )[0]
_UpperCamelCase : Optional[int] = processed[key]
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : int = len(lowerCamelCase__ )
else:
_UpperCamelCase : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase : int = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCamelCase : Dict = processed
_UpperCamelCase : str = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( lowercase ):
def __init__( self : str ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any=None ):
'''simple docstring'''
super().__init__(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def __iter__( self : Dict ):
'''simple docstring'''
_UpperCamelCase : str = iter(self.loader )
_UpperCamelCase : List[str] = None
return self
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
_UpperCamelCase : Tuple = self.infer(next(self.iterator ) ,**self.params )
try:
# Try to return next item
_UpperCamelCase : Optional[Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCamelCase : List[Any] = self.infer(next(self.iterator ) ,**self.params )
_UpperCamelCase : int = next(self.subiterator )
return processed
class lowercase__ ( lowercase ):
def __iter__( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Dict = iter(self.loader )
return self
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase : Dict = self.loader_batch_item()
_UpperCamelCase : List[str] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
if is_last:
return accumulator
while not is_last:
_UpperCamelCase : List[Any] = self.infer(next(self.iterator ) ,**self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase__ ,torch.Tensor ):
_UpperCamelCase : str = processed
else:
_UpperCamelCase : Any = list(processed.keys() )[0]
_UpperCamelCase : Tuple = processed[key]
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Dict = len(lowerCamelCase__ )
else:
_UpperCamelCase : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase : Any = observed_batch_size
_UpperCamelCase : List[Any] = processed
_UpperCamelCase : int = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase : List[Any] = self.loader_batch_item()
_UpperCamelCase : Optional[Any] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
if is_last:
return accumulator
else:
_UpperCamelCase : Any = processed
_UpperCamelCase : List[Any] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
return accumulator
class lowercase__ ( lowercase ):
def __init__( self : Tuple ,lowerCamelCase__ : Dataset ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : int = dataset
_UpperCamelCase : str = key
def __len__( self : Dict ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( lowercase ):
def __init__( self : List[Any] ,lowerCamelCase__ : Dataset ,lowerCamelCase__ : str ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : int = dataset
_UpperCamelCase : Optional[Any] = keya
_UpperCamelCase : str = keya
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 83 | 0 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
A__ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __lowerCAmelCase ( lowerCamelCase__ ):
def __init__( self , **_snake_case ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
self.check_model_type(lowerCamelCase__ )
def snake_case ( self , **_snake_case ):
"""simple docstring"""
_lowerCAmelCase = {}
_lowerCAmelCase = {}
_lowerCAmelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
_lowerCAmelCase = kwargs['points_per_batch']
if "points_per_crop" in kwargs:
_lowerCAmelCase = kwargs['points_per_crop']
if "crops_n_layers" in kwargs:
_lowerCAmelCase = kwargs['crops_n_layers']
if "crop_overlap_ratio" in kwargs:
_lowerCAmelCase = kwargs['crop_overlap_ratio']
if "crop_n_points_downscale_factor" in kwargs:
_lowerCAmelCase = kwargs['crop_n_points_downscale_factor']
# postprocess args
if "pred_iou_thresh" in kwargs:
_lowerCAmelCase = kwargs['pred_iou_thresh']
if "stability_score_offset" in kwargs:
_lowerCAmelCase = kwargs['stability_score_offset']
if "mask_threshold" in kwargs:
_lowerCAmelCase = kwargs['mask_threshold']
if "stability_score_thresh" in kwargs:
_lowerCAmelCase = kwargs['stability_score_thresh']
if "crops_nms_thresh" in kwargs:
_lowerCAmelCase = kwargs['crops_nms_thresh']
if "output_rle_mask" in kwargs:
_lowerCAmelCase = kwargs['output_rle_mask']
if "output_bboxes_mask" in kwargs:
_lowerCAmelCase = kwargs['output_bboxes_mask']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self , _snake_case , *_snake_case , _snake_case=None , _snake_case=None , **_snake_case ):
"""simple docstring"""
return super().__call__(lowerCamelCase__ , *lowerCamelCase__ , num_workers=lowerCamelCase__ , batch_size=lowerCamelCase__ , **lowerCamelCase__ )
def snake_case ( self , _snake_case , _snake_case=64 , _snake_case = 0 , _snake_case = 512 / 1500 , _snake_case = 32 , _snake_case = 1 , ):
"""simple docstring"""
_lowerCAmelCase = load_image(lowerCamelCase__ )
_lowerCAmelCase = self.image_processor.size['longest_edge']
_lowerCAmelCase = self.image_processor.generate_crop_boxes(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCAmelCase = self.image_processor(images=lowerCamelCase__ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_lowerCAmelCase = self.get_inference_context()
with inference_context():
_lowerCAmelCase = self._ensure_tensor_on_device(lowerCamelCase__ , device=self.device )
_lowerCAmelCase = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_lowerCAmelCase = image_embeddings
_lowerCAmelCase = grid_points.shape[1]
_lowerCAmelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCAmelCase = grid_points[:, i : i + points_per_batch, :, :]
_lowerCAmelCase = input_labels[:, i : i + points_per_batch]
_lowerCAmelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def snake_case ( self , _snake_case , _snake_case=0.88 , _snake_case=0.95 , _snake_case=0 , _snake_case=1 , ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop("""input_boxes""" )
_lowerCAmelCase = model_inputs.pop("""is_last""" )
_lowerCAmelCase = model_inputs.pop("""original_sizes""" ).tolist()
_lowerCAmelCase = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_lowerCAmelCase = self.model(**lowerCamelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_lowerCAmelCase = model_outputs['pred_masks']
_lowerCAmelCase = self.image_processor.post_process_masks(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , binarize=lowerCamelCase__ )
_lowerCAmelCase = model_outputs['iou_scores']
_lowerCAmelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def snake_case ( self , _snake_case , _snake_case=False , _snake_case=False , _snake_case=0.7 , ):
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_lowerCAmelCase = torch.cat(lowerCamelCase__ )
_lowerCAmelCase = torch.cat(lowerCamelCase__ )
_lowerCAmelCase = self.image_processor.post_process_for_mask_generation(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowerCAmelCase = defaultdict(lowerCamelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase__ )
_lowerCAmelCase = {}
if output_rle_mask:
_lowerCAmelCase = rle_mask
if output_bboxes_mask:
_lowerCAmelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 82 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
snake_case_ : Any = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def A__ ( ):
_UpperCamelCase : Tuple = Github(os.environ['GITHUB_TOKEN'] )
_UpperCamelCase : List[Any] = g.get_repo('huggingface/diffusers' )
_UpperCamelCase : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
_UpperCamelCase : Dict = sorted(issue.get_comments() , key=lambda UpperCAmelCase_ : i.created_at , reverse=UpperCAmelCase_ )
_UpperCamelCase : List[str] = comments[0] if len(UpperCAmelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 83 | 0 |
"""simple docstring"""
from math import pi
def lowercase ( a__ : Dict , a__ : int ) -> str:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 256 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowercase ) , """Tatoeba directory does not exist.""" )
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
self.resolver.convert_models(['heb-eng'] )
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Dict = self.resolver.write_model_card('opus-mt-he-en' ,dry_run=lowerCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 83 | 0 |
"""simple docstring"""
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase : Any = 16
_lowerCamelCase : List[str] = 32
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase = 16 ):
"""simple docstring"""
A_ : Union[str, Any] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
A_ : str = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(_UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
A_ : Any = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
A_ : List[str] = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A_ : str = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(_UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
A_ : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
A_ : int = 16
elif accelerator.mixed_precision != "no":
A_ : str = 8
else:
A_ : str = None
return tokenizer.pad(
UpperCAmelCase_ , padding='''longest''' , max_length=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_tensors='''pt''' , )
# Instantiate dataloaders.
A_ : List[str] = DataLoader(
tokenized_datasets['''train'''] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , drop_last=UpperCAmelCase_ )
A_ : List[Any] = DataLoader(
tokenized_datasets['''validation'''] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A_ : Optional[int] = config['lr']
A_ : Optional[int] = int(config['''num_epochs'''] )
A_ : Tuple = int(config['''seed'''] )
A_ : int = int(config['''batch_size'''] )
A_ : Tuple = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
A_ : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
A_ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(UpperCAmelCase_ )
A_ : Optional[int] = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A_ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
A_ : Dict = model.to(accelerator.device )
# Instantiate optimizer
A_ : Any = AdamW(params=model.parameters() , lr=UpperCAmelCase_ )
# Instantiate scheduler
A_ : str = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A_ : Optional[Any] = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Now we train the model
for epoch in range(UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
A_ : Optional[Any] = model(**UpperCAmelCase_ )
A_ : Tuple = outputs.loss
A_ : Any = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A_ : int = model(**UpperCAmelCase_ )
A_ : Union[str, Any] = outputs.logits.argmax(dim=-1 )
A_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
A_ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , UpperCAmelCase_ )
def lowercase_ ( ):
"""simple docstring"""
A_ : int = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
A_ : int = parser.parse_args()
A_ : List[str] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 167 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : int = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-prophetnet"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase__ : Optional[int] = 30522 ,lowerCamelCase__ : Optional[int] = 1024 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[int] = 512 ,lowerCamelCase__ : Optional[float] = 0.0_2 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 2 ,lowerCamelCase__ : Optional[int] = 32 ,lowerCamelCase__ : Optional[int] = 128 ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[float] = 0.0 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : Optional[int] = 2 ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : str = encoder_ffn_dim
_UpperCamelCase : List[Any] = num_encoder_layers
_UpperCamelCase : Tuple = num_encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : List[Any] = num_decoder_layers
_UpperCamelCase : List[Any] = num_decoder_attention_heads
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : str = init_std # Normal(0, this parameter)
_UpperCamelCase : List[str] = activation_function
# parameters for xlmprophetnet
_UpperCamelCase : Tuple = ngram
_UpperCamelCase : Optional[Any] = num_buckets
_UpperCamelCase : Tuple = relative_max_distance
_UpperCamelCase : str = disable_ngram_loss
_UpperCamelCase : str = eps
# 3 Types of Dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : str = activation_dropout
_UpperCamelCase : List[str] = dropout
_UpperCamelCase : Tuple = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,add_cross_attention=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 83 | 0 |
'''simple docstring'''
import numpy as np
class snake_case :
"""simple docstring"""
def __init__( self : Any ):
__UpperCamelCase = (0, 0)
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = 0
def __eq__( self : Dict , __A : Dict ):
return self.position == cell.position
def _lowerCamelCase ( self : Any ):
print(self.position )
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] , __A : Tuple=(5, 5) ):
__UpperCamelCase = np.zeros(lowerCamelCase__ )
__UpperCamelCase = world_size[0]
__UpperCamelCase = world_size[1]
def _lowerCamelCase ( self : List[str] ):
print(self.w )
def _lowerCamelCase ( self : List[str] , __A : Optional[int] ):
__UpperCamelCase = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__UpperCamelCase = cell.position[0]
__UpperCamelCase = cell.position[1]
__UpperCamelCase = []
for n in neughbour_cord:
__UpperCamelCase = current_x + n[0]
__UpperCamelCase = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__UpperCamelCase = Cell()
__UpperCamelCase = (x, y)
__UpperCamelCase = cell
neighbours.append(lowerCamelCase__ )
return neighbours
def lowercase__ ( __lowercase : int , __lowercase : Any , __lowercase : Dict ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = []
_open.append(UpperCAmelCase_ )
while _open:
__UpperCamelCase = np.argmin([n.f for n in _open] )
__UpperCamelCase = _open[min_f]
_closed.append(_open.pop(UpperCAmelCase_ ) )
if current == goal:
break
for n in world.get_neigbours(UpperCAmelCase_ ):
for c in _closed:
if c == n:
continue
__UpperCamelCase = current.g + 1
__UpperCamelCase = n.position
__UpperCamelCase = goal.position
__UpperCamelCase = (ya - ya) ** 2 + (xa - xa) ** 2
__UpperCamelCase = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(UpperCAmelCase_ )
__UpperCamelCase = []
while current.parent is not None:
path.append(current.position )
__UpperCamelCase = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
a__ : str =Gridworld()
# Start position and goal
a__ : Optional[int] =Cell()
a__ : Dict =(0, 0)
a__ : str =Cell()
a__ : Optional[Any] =(4, 4)
print(f'path from {start.position} to {goal.position}')
a__ : Union[str, Any] =astar(world, start, goal)
# Just for visual reasons.
for i in s:
a__ : Optional[int] =1
print(world.w)
| 53 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1_0_0_0 ):
_UpperCamelCase : Dict = 3
_UpperCamelCase : Any = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83 | 0 |
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 128 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 83 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowercase__ :Dict = logging.get_logger(__name__)
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ ,A__ ,**A__):
lowercase = feature_size
lowercase = sampling_rate
lowercase = padding_value
lowercase = kwargs.pop('''padding_side''' ,'''right''')
lowercase = kwargs.pop('''return_attention_mask''' ,lowerCamelCase__)
super().__init__(**lowerCamelCase__)
def A__ ( self ,A__ ,A__ = True ,A__ = None ,A__ = False ,A__ = None ,A__ = None ,A__ = None ,):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase__ ,(list, tuple)) and isinstance(processed_features[0] ,(dict, BatchFeature)):
lowercase = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys())}')
lowercase = processed_features[self.model_input_names[0]]
lowercase = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__) == 0:
if return_attention_mask:
lowercase = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
lowercase = required_input[0]
if isinstance(lowerCamelCase__ ,(list, tuple)):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
lowercase = 0
while len(required_input[index]) == 0:
index += 1
if index < len(lowerCamelCase__):
lowercase = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__):
lowercase = 'tf'
elif is_torch_tensor(lowerCamelCase__):
lowercase = 'pt'
elif isinstance(lowerCamelCase__ ,(int, float, list, tuple, np.ndarray)):
lowercase = 'np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(lowerCamelCase__)}. '
'''Should be one of a python, numpy, pytorch or tensorflow object.''')
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float)):
lowercase = to_numpy(lowerCamelCase__)
else:
lowercase = [to_numpy(lowerCamelCase__) for v in value]
# Convert padding_strategy in PaddingStrategy
lowercase = self._get_padding_strategies(padding=lowerCamelCase__ ,max_length=lowerCamelCase__)
lowercase = processed_features[self.model_input_names[0]]
lowercase = len(lowerCamelCase__)
if not all(len(lowerCamelCase__) == batch_size for v in processed_features.values()):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''')
lowercase = []
for i in range(lowerCamelCase__):
lowercase = {k: v[i] for k, v in processed_features.items()}
# truncation
lowercase = self._truncate(
lowerCamelCase__ ,max_length=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,truncation=lowerCamelCase__ ,)
truncated_inputs.append(lowerCamelCase__)
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
lowercase = max(len(input_slice[self.model_input_names[0]]) for input_slice in truncated_inputs)
lowercase = PaddingStrategy.MAX_LENGTH
lowercase = {}
for i in range(lowerCamelCase__):
# padding
lowercase = self._pad(
truncated_inputs[i] ,max_length=lowerCamelCase__ ,padding_strategy=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,)
for key, value in outputs.items():
if key not in batch_outputs:
lowercase = []
if value.dtype is np.dtype(np.floataa):
lowercase = value.astype(np.floataa)
batch_outputs[key].append(lowerCamelCase__)
return BatchFeature(lowerCamelCase__ ,tensor_type=lowerCamelCase__)
def A__ ( self ,A__ ,A__ = None ,A__ = PaddingStrategy.DO_NOT_PAD ,A__ = None ,A__ = None ,):
lowercase = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
lowercase = len(lowerCamelCase__)
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
lowercase = np.ones(len(lowerCamelCase__) ,dtype=np.intaa)
if needs_to_be_padded:
lowercase = max_length - len(lowerCamelCase__)
if self.padding_side == "right":
if return_attention_mask:
lowercase = np.pad(
processed_features['''attention_mask'''] ,(0, difference))
lowercase = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
lowercase = np.pad(
lowerCamelCase__ ,lowerCamelCase__ ,'''constant''' ,constant_values=self.padding_value)
elif self.padding_side == "left":
if return_attention_mask:
lowercase = np.pad(
processed_features['''attention_mask'''] ,(difference, 0))
lowercase = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
lowercase = np.pad(
lowerCamelCase__ ,lowerCamelCase__ ,'''constant''' ,constant_values=self.padding_value)
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side))
return processed_features
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''')
lowercase = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
lowercase = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
lowercase = len(lowerCamelCase__) > max_length
if needs_to_be_truncated:
lowercase = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
lowercase = processed_features['attention_mask'][:max_length]
return processed_features
def A__ ( self ,A__=False ,A__=None):
# Get padding strategy
if padding is not False:
if padding is True:
lowercase = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__ ,lowerCamelCase__):
lowercase = PaddingStrategy(lowerCamelCase__)
elif isinstance(lowerCamelCase__ ,lowerCamelCase__):
lowercase = padding
else:
lowercase = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined')
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''')
return padding_strategy
| 101 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
snake_case_ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowercase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowercase__ :
lowercase__ = field(default=lowercase , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
if self.train_file is not None:
_UpperCamelCase : List[Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCamelCase : Union[str, Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowercase__ :
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
def __call__( self : Optional[Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = 'label' if 'label' in features[0].keys() else 'labels'
_UpperCamelCase : List[Any] = [feature.pop(lowerCamelCase__ ) for feature in features]
_UpperCamelCase : Dict = len(lowerCamelCase__ )
_UpperCamelCase : List[str] = len(features[0]['input_ids'] )
_UpperCamelCase : List[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase__ )] for feature in features
]
_UpperCamelCase : str = list(chain(*lowerCamelCase__ ) )
_UpperCamelCase : Tuple = self.tokenizer.pad(
lowerCamelCase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
_UpperCamelCase : str = {k: v.view(lowerCamelCase__ ,lowerCamelCase__ ,-1 ) for k, v in batch.items()}
# Add back labels
_UpperCamelCase : Optional[int] = torch.tensor(lowerCamelCase__ ,dtype=torch.intaa )
return batch
def A__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
datasets.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_UpperCamelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCamelCase : Optional[int] = {}
if data_args.train_file is not None:
_UpperCamelCase : Tuple = data_args.train_file
if data_args.validation_file is not None:
_UpperCamelCase : Tuple = data_args.validation_file
_UpperCamelCase : Any = data_args.train_file.split('.' )[-1]
_UpperCamelCase : Union[str, Any] = load_dataset(
UpperCAmelCase_ , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCamelCase : List[str] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCamelCase : Any = [f'ending{i}' for i in range(4 )]
_UpperCamelCase : int = 'sent1'
_UpperCamelCase : List[str] = 'sent2'
if data_args.max_seq_length is None:
_UpperCamelCase : int = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_UpperCamelCase : int = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_UpperCamelCase : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCAmelCase_ ):
_UpperCamelCase : str = [[context] * 4 for context in examples[context_name]]
_UpperCamelCase : Optional[Any] = examples[question_header_name]
_UpperCamelCase : Tuple = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(UpperCAmelCase_ )
]
# Flatten out
_UpperCamelCase : Optional[int] = list(chain(*UpperCAmelCase_ ) )
_UpperCamelCase : Optional[Any] = list(chain(*UpperCAmelCase_ ) )
# Tokenize
_UpperCamelCase : Tuple = tokenizer(
UpperCAmelCase_ , UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCamelCase : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCamelCase : Tuple = min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
_UpperCamelCase : Tuple = train_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_UpperCamelCase : Union[str, Any] = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCamelCase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCamelCase : Union[str, Any] = min(len(UpperCAmelCase_ ) , data_args.max_eval_samples )
_UpperCamelCase : str = eval_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_UpperCamelCase : Dict = eval_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_UpperCamelCase : List[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = eval_predictions
_UpperCamelCase : List[str] = np.argmax(UpperCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCamelCase : Optional[int] = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
# Training
if training_args.do_train:
_UpperCamelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase : int = last_checkpoint
_UpperCamelCase : List[str] = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCamelCase : Union[str, Any] = train_result.metrics
_UpperCamelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
_UpperCamelCase : Optional[Any] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('train' , UpperCAmelCase_ )
trainer.save_metrics('train' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCamelCase : List[Any] = trainer.evaluate()
_UpperCamelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase_ )
_UpperCamelCase : int = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('eval' , UpperCAmelCase_ )
trainer.save_metrics('eval' , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 83 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : Dict = '''visual_bert'''
def __init__( self, A=30_522, A=768, A=512, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=512, A=2, A=0.02, A=1E-12, A=False, A=True, A=1, A=0, A=2, **A, ):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, **lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = vocab_size
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = visual_embedding_dim
SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = bypass_transformer
SCREAMING_SNAKE_CASE : Optional[Any] = special_visual_initialize
| 251 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
lowercase__ = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
lowercase__ = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def A__ ( ):
_UpperCamelCase : Optional[Any] = HfArgumentParser((ModelArguments,) )
((_UpperCamelCase) , ) : Optional[int] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_UpperCamelCase : Any = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_UpperCamelCase : str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_UpperCamelCase : str = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : str = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=UpperCAmelCase_ , decoder_config=UpperCAmelCase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_UpperCamelCase : str = decoder_config.decoder_start_token_id
_UpperCamelCase : Optional[int] = decoder_config.pad_token_id
if decoder_start_token_id is None:
_UpperCamelCase : int = decoder_config.bos_token_id
if pad_token_id is None:
_UpperCamelCase : Dict = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_UpperCamelCase : List[Any] = decoder_config.eos_token_id
_UpperCamelCase : Dict = decoder_start_token_id
_UpperCamelCase : int = pad_token_id
_UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 83 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def lowercase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowerCamelCase__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = self.dummy_uncond_unet
lowerCAmelCase : Optional[int] = DDIMScheduler()
lowerCAmelCase : str = self.dummy_vq_model
lowerCAmelCase : Any = LDMPipeline(unet=lowerCamelCase__ , vqvae=lowerCamelCase__ , scheduler=lowerCamelCase__ )
ldm.to(lowerCamelCase__ )
ldm.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCAmelCase : int = torch.manual_seed(0 )
lowerCAmelCase : List[str] = ldm(generator=lowerCamelCase__ , num_inference_steps=2 , output_type="numpy" ).images
lowerCAmelCase : Any = torch.manual_seed(0 )
lowerCAmelCase : Any = ldm(generator=lowerCamelCase__ , num_inference_steps=2 , output_type="numpy" , return_dict=lowerCamelCase__ )[0]
lowerCAmelCase : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase : int = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
lowerCAmelCase : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(lowerCamelCase__ )
ldm.set_progress_bar_config(disable=lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase : Optional[int] = ldm(generator=lowerCamelCase__ , num_inference_steps=5 , output_type="numpy" ).images
lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase : Any = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
lowerCAmelCase : List[Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 108 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case_ : Dict = logging.get_logger(__name__)
class lowercase__ ( lowercase ):
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : float ,**lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : List[Any] = feature_size
_UpperCamelCase : Any = sampling_rate
_UpperCamelCase : Optional[Any] = padding_value
_UpperCamelCase : Union[str, Any] = kwargs.pop('padding_side' ,'right' )
_UpperCamelCase : Dict = kwargs.pop('return_attention_mask' ,lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] ,lowerCamelCase__ : Union[bool, str, PaddingStrategy] = True ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,):
'''simple docstring'''
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase__ ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
_UpperCamelCase : int = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
_UpperCamelCase : List[Any] = processed_features[self.model_input_names[0]]
_UpperCamelCase : Dict = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__ ) == 0:
if return_attention_mask:
_UpperCamelCase : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_UpperCamelCase : List[str] = required_input[0]
if isinstance(lowerCamelCase__ ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_UpperCamelCase : List[str] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase__ ):
_UpperCamelCase : Dict = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__ ):
_UpperCamelCase : Any = 'tf'
elif is_torch_tensor(lowerCamelCase__ ):
_UpperCamelCase : Optional[int] = 'pt'
elif isinstance(lowerCamelCase__ ,(int, float, list, tuple, np.ndarray) ):
_UpperCamelCase : int = 'np'
else:
raise ValueError(
F'type of {first_element} unknown: {type(lowerCamelCase__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
_UpperCamelCase : Any = to_numpy(lowerCamelCase__ )
else:
_UpperCamelCase : Any = [to_numpy(lowerCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
_UpperCamelCase : Optional[int] = self._get_padding_strategies(padding=lowerCamelCase__ ,max_length=lowerCamelCase__ )
_UpperCamelCase : str = processed_features[self.model_input_names[0]]
_UpperCamelCase : List[str] = len(lowerCamelCase__ )
if not all(len(lowerCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
_UpperCamelCase : List[str] = []
for i in range(lowerCamelCase__ ):
_UpperCamelCase : List[str] = {k: v[i] for k, v in processed_features.items()}
# truncation
_UpperCamelCase : List[str] = self._truncate(
lowerCamelCase__ ,max_length=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,truncation=lowerCamelCase__ ,)
truncated_inputs.append(lowerCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_UpperCamelCase : Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_UpperCamelCase : Any = PaddingStrategy.MAX_LENGTH
_UpperCamelCase : Optional[Any] = {}
for i in range(lowerCamelCase__ ):
# padding
_UpperCamelCase : Any = self._pad(
truncated_inputs[i] ,max_length=lowerCamelCase__ ,padding_strategy=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,)
for key, value in outputs.items():
if key not in batch_outputs:
_UpperCamelCase : Dict = []
if value.dtype is np.dtype(np.floataa ):
_UpperCamelCase : Any = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase__ )
return BatchFeature(lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_UpperCamelCase : Optional[Any] = len(lowerCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCamelCase : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCamelCase : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_UpperCamelCase : Tuple = np.ones(len(lowerCamelCase__ ) ,dtype=np.intaa )
if needs_to_be_padded:
_UpperCamelCase : Dict = max_length - len(lowerCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
_UpperCamelCase : Optional[int] = np.pad(
processed_features['attention_mask'] ,(0, difference) )
_UpperCamelCase : Union[str, Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_UpperCamelCase : List[Any] = np.pad(
lowerCamelCase__ ,lowerCamelCase__ ,'constant' ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_UpperCamelCase : List[Any] = np.pad(
processed_features['attention_mask'] ,(difference, 0) )
_UpperCamelCase : List[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_UpperCamelCase : List[str] = np.pad(
lowerCamelCase__ ,lowerCamelCase__ ,'constant' ,constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
_UpperCamelCase : int = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCamelCase : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCamelCase : Optional[int] = len(lowerCamelCase__ ) > max_length
if needs_to_be_truncated:
_UpperCamelCase : Dict = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_UpperCamelCase : Optional[Any] = processed_features['attention_mask'][:max_length]
return processed_features
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : int=False ,lowerCamelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Get padding strategy
if padding is not False:
if padding is True:
_UpperCamelCase : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Tuple = PaddingStrategy(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = padding
else:
_UpperCamelCase : List[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 83 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Tuple ) -> List[Any]:
lowerCAmelCase_ : Optional[int] = mock.Mock()
lowerCAmelCase_ : str = 5_00
lowerCAmelCase_ : str = {}
lowerCAmelCase_ : Tuple = HTTPError
lowerCAmelCase_ : Tuple = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ : List[str] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=lowerCamelCase__ ) as mock_head:
lowerCAmelCase_ : Optional[int] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __lowercase ( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = mock.Mock()
lowerCAmelCase_ : int = 5_00
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : Dict = HTTPError
lowerCAmelCase_ : int = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ : List[str] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=lowerCamelCase__ ) as mock_head:
lowerCAmelCase_ : List[str] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Optional[int] ) -> Union[str, Any]:
# This test is for deprecated behavior and can be removed in v5
try:
lowerCAmelCase_ : Optional[Any] = tempfile.mktemp()
with open(lowerCamelCase__ , """wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , lowerCamelCase__ )
lowerCAmelCase_ : str = AlbertTokenizer.from_pretrained(lowerCamelCase__ )
finally:
os.remove(lowerCamelCase__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" , """wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , lowerCamelCase__ )
lowerCAmelCase_ : str = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def __lowercase ( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class __snake_case ( unittest.TestCase):
"""simple docstring"""
lowercase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def __lowercase ( cls : Union[str, Any] ) -> Tuple:
lowerCAmelCase_ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def __lowercase ( cls : str ) -> str:
try:
delete_repo(token=cls._token , repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def __lowercase ( self : int ) -> Any:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : Tuple = os.path.join(lowerCamelCase__ , """vocab.txt""" )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCAmelCase_ : List[str] = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token )
lowerCAmelCase_ : List[Any] = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase__ , repo_id="""test-tokenizer""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
lowerCAmelCase_ : int = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __lowercase ( self : Any ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : Tuple = os.path.join(lowerCamelCase__ , """vocab.txt""" )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCAmelCase_ : List[str] = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token )
lowerCAmelCase_ : str = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase__ , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
lowerCAmelCase_ : Dict = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __lowercase ( self : Union[str, Any] ) -> Optional[int]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : str = os.path.join(lowerCamelCase__ , """vocab.txt""" )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCAmelCase_ : List[Any] = CustomTokenizer(lowerCamelCase__ )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
lowerCAmelCase_ : Tuple = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ : Optional[int] = os.path.join(lowerCamelCase__ , """vocab.txt""" )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
lowerCAmelCase_ : Union[str, Any] = BertTokenizerFast.from_pretrained(lowerCamelCase__ )
bert_tokenizer.save_pretrained(lowerCamelCase__ )
lowerCAmelCase_ : Optional[int] = CustomTokenizerFast.from_pretrained(lowerCamelCase__ )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
lowerCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" )
lowerCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Any ) -> Any:
lowerCAmelCase_ : Optional[Any] = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def __lowercase ( self : List[Any] ) -> Any:
lowerCAmelCase_ : Union[str, Any] = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] )
def __lowercase ( self : List[Any] ) -> Dict:
lowerCAmelCase_ : str = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] )
def __lowercase ( self : Tuple ) -> Tuple:
lowerCAmelCase_ : int = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def __lowercase ( self : Tuple ) -> Any:
lowerCAmelCase_ : Optional[int] = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def __lowercase ( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase_ : List[Any] = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] )
def __lowercase ( self : int ) -> int:
lowerCAmelCase_ : Any = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] )
def __lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase_ : Dict = Trie()
lowerCAmelCase_ : Optional[Any] = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase__ , ["""AB""", """C"""] )
| 120 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__ :
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : MutableSequence[float] ):
'''simple docstring'''
if len(lowerCamelCase__ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_UpperCamelCase : list[float] = list(lowerCamelCase__ )
_UpperCamelCase : Tuple = degree
def __add__( self : Optional[int] ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_UpperCamelCase : str = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,lowerCamelCase__ )
else:
_UpperCamelCase : str = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,lowerCamelCase__ )
def __sub__( self : Dict ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self : Dict ):
'''simple docstring'''
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self : Union[str, Any] ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : int | float ):
'''simple docstring'''
_UpperCamelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = ''
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCamelCase__ )
return polynomial
def __repr__( self : List[str] ):
'''simple docstring'''
return self.__str__()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
_UpperCamelCase : Optional[int] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : int | float = 0 ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * (self.degree + 2)
_UpperCamelCase : Any = constant
for i in range(self.degree + 1 ):
_UpperCamelCase : Optional[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,lowerCamelCase__ )
def __eq__( self : str ,lowerCamelCase__ : object ):
'''simple docstring'''
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] ,lowerCamelCase__ : object ):
'''simple docstring'''
return not self.__eq__(lowerCamelCase__ )
| 83 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __snake_case ( _lowercase):
snake_case__ : int = "glpn"
def __init__( self : str , __lowerCAmelCase : Dict=3 , __lowerCAmelCase : Tuple=4 , __lowerCAmelCase : List[str]=[2, 2, 2, 2] , __lowerCAmelCase : int=[8, 4, 2, 1] , __lowerCAmelCase : List[Any]=[3_2, 6_4, 1_6_0, 2_5_6] , __lowerCAmelCase : Optional[Any]=[7, 3, 3, 3] , __lowerCAmelCase : Union[str, Any]=[4, 2, 2, 2] , __lowerCAmelCase : int=[1, 2, 5, 8] , __lowerCAmelCase : int=[4, 4, 4, 4] , __lowerCAmelCase : Union[str, Any]="gelu" , __lowerCAmelCase : List[str]=0.0 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Union[str, Any]=0.1 , __lowerCAmelCase : Optional[Any]=1E-6 , __lowerCAmelCase : Optional[Any]=6_4 , __lowerCAmelCase : Any=1_0 , __lowerCAmelCase : Tuple=-1 , **__lowerCAmelCase : str , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
_lowerCamelCase : Dict = num_channels
_lowerCamelCase : List[Any] = num_encoder_blocks
_lowerCamelCase : List[Any] = depths
_lowerCamelCase : Tuple = sr_ratios
_lowerCamelCase : List[str] = hidden_sizes
_lowerCamelCase : Dict = patch_sizes
_lowerCamelCase : List[Any] = strides
_lowerCamelCase : Any = mlp_ratios
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Union[str, Any] = hidden_act
_lowerCamelCase : Tuple = hidden_dropout_prob
_lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Dict = drop_path_rate
_lowerCamelCase : Union[str, Any] = layer_norm_eps
_lowerCamelCase : str = decoder_hidden_size
_lowerCamelCase : int = max_depth
_lowerCamelCase : Dict = head_in_index
| 72 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowercase__ ( lowercase ):
@require_torch
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_UpperCamelCase : Dict = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_UpperCamelCase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_UpperCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase__ )
BertModel.from_pretrained(lowerCamelCase__ )
BertTokenizer.from_pretrained(lowerCamelCase__ )
pipeline(task='fill-mask' ,model=lowerCamelCase__ )
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_UpperCamelCase : Dict = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : str = '1'
_UpperCamelCase : Union[str, Any] = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_UpperCamelCase : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_UpperCamelCase : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_UpperCamelCase : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(lowerCamelCase__ )
BertModel.from_pretrained(lowerCamelCase__ )
BertTokenizer.from_pretrained(lowerCamelCase__ )
pipeline(task='fill-mask' ,model=lowerCamelCase__ )
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_UpperCamelCase : List[Any] = self.get_env()
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_UpperCamelCase : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_UpperCamelCase : str = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_UpperCamelCase : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Optional[int] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_UpperCamelCase : Optional[Any] = self.get_env()
_UpperCamelCase : int = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_UpperCamelCase : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : Dict = '1'
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : int = '\nfrom transformers import pipeline\n '
_UpperCamelCase : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_UpperCamelCase : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_UpperCamelCase : Union[str, Any] = self.get_env()
_UpperCamelCase : List[Any] = '1'
_UpperCamelCase : Tuple = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_UpperCamelCase : int = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = '\nfrom transformers import AutoModel\n '
_UpperCamelCase : int = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_UpperCamelCase : Any = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_UpperCamelCase : Optional[Any] = self.get_env()
_UpperCamelCase : Optional[int] = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCamelCase : List[Any] = '1'
_UpperCamelCase : Dict = subprocess.run(lowerCamelCase__ ,env=lowerCamelCase__ ,check=lowerCamelCase__ ,capture_output=lowerCamelCase__ )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 83 | 0 |
from __future__ import annotations
import math
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , )
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =[90, 23, 6, 33, 21, 65, 123, 34_423]
_lowerCamelCase : Dict =math.log(len(UpperCAmelCase_ ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 199 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class lowercase__ ( unittest.TestCase ):
def __init__( self : List[str] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : List[str]=13 ,lowerCamelCase__ : Dict=7 ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : List[Any]=True ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Dict=99 ,lowerCamelCase__ : int=32 ,lowerCamelCase__ : Tuple=5 ,lowerCamelCase__ : Dict=4 ,lowerCamelCase__ : Any=37 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : List[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=0.1 ,lowerCamelCase__ : Optional[Any]=512 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : int=0.0_2 ,lowerCamelCase__ : int=4 ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = parent
_UpperCamelCase : Dict = batch_size
_UpperCamelCase : Union[str, Any] = seq_length
_UpperCamelCase : Optional[Any] = is_training
_UpperCamelCase : Optional[int] = use_attention_mask
_UpperCamelCase : Any = use_token_type_ids
_UpperCamelCase : str = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[Any] = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : str = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Any = hidden_dropout_prob
_UpperCamelCase : Any = attention_probs_dropout_prob
_UpperCamelCase : List[str] = max_position_embeddings
_UpperCamelCase : Optional[int] = type_vocab_size
_UpperCamelCase : str = type_sequence_label_size
_UpperCamelCase : Dict = initializer_range
_UpperCamelCase : List[Any] = num_choices
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCamelCase : Union[str, Any] = None
if self.use_attention_mask:
_UpperCamelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase : Any = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=lowerCamelCase__ ,)
return config, input_ids, attention_mask
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[Any] = config_and_inputs
_UpperCamelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[str] = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase : Dict = model_class_name.from_pretrained('distilbert-base-uncased' )
_UpperCamelCase : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
_UpperCamelCase : List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase : Dict = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ )[0]
_UpperCamelCase : Any = (1, 11, 768)
self.assertEqual(output.shape ,lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,lowerCamelCase__ ,atol=1E-4 ) )
| 83 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def snake_case ( self , _snake_case=0 ):
"""simple docstring"""
_lowerCAmelCase = np.random.RandomState(lowerCamelCase__ )
_lowerCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**lowerCamelCase__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**lowerCamelCase__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**lowerCamelCase__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**lowerCamelCase__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**lowerCamelCase__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = pipe(**lowerCamelCase__ ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
_lowerCAmelCase = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = 3 * [inputs['prompt']]
# forward
_lowerCAmelCase = pipe(**lowerCamelCase__ )
_lowerCAmelCase = output.images[0, -3:, -3:, -1]
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = pipe.tokenizer(
lowerCamelCase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors="""np""" , )
_lowerCAmelCase = text_inputs['input_ids']
_lowerCAmelCase = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_lowerCAmelCase = prompt_embeds
# forward
_lowerCAmelCase = pipe(**lowerCamelCase__ )
_lowerCAmelCase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = 3 * ['this is a negative prompt']
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = 3 * [inputs['prompt']]
# forward
_lowerCAmelCase = pipe(**lowerCamelCase__ )
_lowerCAmelCase = output.images[0, -3:, -3:, -1]
_lowerCAmelCase = self.get_dummy_inputs()
_lowerCAmelCase = 3 * [inputs.pop("""prompt""" )]
_lowerCAmelCase = []
for p in [prompt, negative_prompt]:
_lowerCAmelCase = pipe.tokenizer(
lowerCamelCase__ , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=lowerCamelCase__ , return_tensors="""np""" , )
_lowerCAmelCase = text_inputs['input_ids']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_lowerCAmelCase = embeds
# forward
_lowerCAmelCase = pipe(**lowerCamelCase__ )
_lowerCAmelCase = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
@property
def snake_case ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = ort.SessionOptions()
_lowerCAmelCase = False
return options
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase = 'A painting of a squirrel eating a burger'
np.random.seed(0 )
_lowerCAmelCase = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
_lowerCAmelCase = output.images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase = 'open neural network exchange'
_lowerCAmelCase = np.random.RandomState(0 )
_lowerCAmelCase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type="""np""" )
_lowerCAmelCase = output.images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase = 'open neural network exchange'
_lowerCAmelCase = np.random.RandomState(0 )
_lowerCAmelCase = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type="""np""" )
_lowerCAmelCase = output.images
_lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = 0
def test_callback_fn(_snake_case , _snake_case , _snake_case ) -> None:
_lowerCAmelCase = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_lowerCAmelCase = latents[0, -3:, -3:, -1]
_lowerCAmelCase = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_lowerCAmelCase = latents[0, -3:, -3:, -1]
_lowerCAmelCase = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1e-3
_lowerCAmelCase = False
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_lowerCAmelCase = 'Andromeda galaxy in a bottle'
_lowerCAmelCase = np.random.RandomState(0 )
pipe(
prompt=lowerCamelCase__ , num_inference_steps=5 , guidance_scale=7.5 , generator=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert pipe.safety_checker is None
_lowerCAmelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
_lowerCAmelCase = OnnxStableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_lowerCAmelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 82 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
snake_case_ : List[Any] = logging.get_logger(__name__)
class lowercase__ ( lowercase ):
lowercase__ = """AutoTokenizer"""
lowercase__ = ["""tokenizer"""]
lowercase__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self : List[str] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Tuple=None ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCamelCase : Dict = speaker_embeddings
@classmethod
def UpperCamelCase_ ( cls : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str="speaker_embeddings_path.json" ,**lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
_UpperCamelCase : Optional[Any] = get_file_from_repo(
lowerCamelCase__ ,lowerCamelCase__ ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,)
if speaker_embeddings_path is None:
logger.warning(
F'`{os.path.join(lowerCamelCase__ ,lowerCamelCase__ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
_UpperCamelCase : Union[str, Any] = None
else:
with open(lowerCamelCase__ ) as speaker_embeddings_json:
_UpperCamelCase : Optional[int] = json.load(lowerCamelCase__ )
else:
_UpperCamelCase : Tuple = None
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
return cls(tokenizer=lowerCamelCase__ ,speaker_embeddings=lowerCamelCase__ )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : int="speaker_embeddings_path.json" ,lowerCamelCase__ : Dict="speaker_embeddings" ,lowerCamelCase__ : bool = False ,**lowerCamelCase__ : Tuple ,):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ,'v2' ) ,exist_ok=lowerCamelCase__ )
_UpperCamelCase : Tuple = {}
_UpperCamelCase : Optional[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_UpperCamelCase : Any = self._load_voice_preset(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] ,lowerCamelCase__ ,F'{prompt_key}_{key}' ) ,voice_preset[key] ,allow_pickle=lowerCamelCase__ ,)
_UpperCamelCase : List[str] = os.path.join(lowerCamelCase__ ,F'{prompt_key}_{key}.npy' )
_UpperCamelCase : str = tmp_dict
with open(os.path.join(lowerCamelCase__ ,lowerCamelCase__ ) ,'w' ) as fp:
json.dump(lowerCamelCase__ ,lowerCamelCase__ )
super().save_pretrained(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : str = None ,**lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.speaker_embeddings[voice_preset]
_UpperCamelCase : Union[str, Any] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
_UpperCamelCase : Dict = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' ,'/' ) ,voice_preset_paths[key] ,subfolder=kwargs.pop('subfolder' ,lowerCamelCase__ ) ,cache_dir=kwargs.pop('cache_dir' ,lowerCamelCase__ ) ,force_download=kwargs.pop('force_download' ,lowerCamelCase__ ) ,proxies=kwargs.pop('proxies' ,lowerCamelCase__ ) ,resume_download=kwargs.pop('resume_download' ,lowerCamelCase__ ) ,local_files_only=kwargs.pop('local_files_only' ,lowerCamelCase__ ) ,use_auth_token=kwargs.pop('use_auth_token' ,lowerCamelCase__ ) ,revision=kwargs.pop('revision' ,lowerCamelCase__ ) ,)
if path is None:
raise ValueError(
F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" ,"/" ) ,voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
_UpperCamelCase : List[str] = np.load(lowerCamelCase__ )
return voice_preset_dict
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] ,np.ndarray ):
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self : Any ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Any="pt" ,lowerCamelCase__ : Dict=256 ,lowerCamelCase__ : int=False ,lowerCamelCase__ : int=True ,lowerCamelCase__ : List[str]=False ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
if voice_preset is not None and not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
if (
isinstance(lowerCamelCase__ ,lowerCamelCase__ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_UpperCamelCase : Optional[int] = self._load_voice_preset(lowerCamelCase__ )
else:
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and not voice_preset.endswith('.npz' ):
_UpperCamelCase : Tuple = voice_preset + '.npz'
_UpperCamelCase : str = np.load(lowerCamelCase__ )
if voice_preset is not None:
self._validate_voice_preset_dict(lowerCamelCase__ ,**lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = BatchFeature(data=lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self.tokenizer(
lowerCamelCase__ ,return_tensors=lowerCamelCase__ ,padding='max_length' ,max_length=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,return_token_type_ids=lowerCamelCase__ ,add_special_tokens=lowerCamelCase__ ,**lowerCamelCase__ ,)
if voice_preset is not None:
_UpperCamelCase : Optional[Any] = voice_preset
return encoded_text
| 83 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( _lowercase , unittest.TestCase):
snake_case__ = CTRLTokenizer
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : str ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_UpperCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_UpperCamelCase = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_UpperCamelCase = {'unk_token': '<unk>'}
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCamelCase__ ) )
def _UpperCamelCase ( self : List[str] , **__UpperCamelCase : Union[str, Any] ) -> Any:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str ) -> str:
_UpperCamelCase = 'adapt react readapt apt'
_UpperCamelCase = 'adapt react readapt apt'
return input_text, output_text
def _UpperCamelCase ( self : str ) -> Union[str, Any]:
_UpperCamelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCamelCase = 'adapt react readapt apt'
_UpperCamelCase = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_UpperCamelCase = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_UpperCamelCase = tokens + [tokenizer.unk_token]
_UpperCamelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
| 256 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
snake_case_ : Tuple = random.Random()
def A__ ( UpperCAmelCase_ , UpperCAmelCase_=1.0 , UpperCAmelCase_=None , UpperCAmelCase_=None ):
if rng is None:
_UpperCamelCase : Dict = global_rng
_UpperCamelCase : int = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__ ( unittest.TestCase ):
def __init__( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=7 ,lowerCamelCase__ : str=400 ,lowerCamelCase__ : int=2000 ,lowerCamelCase__ : int=1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=16000 ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : Optional[int]=True ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : List[str] = min_seq_length
_UpperCamelCase : Optional[int] = max_seq_length
_UpperCamelCase : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCamelCase : List[str] = feature_size
_UpperCamelCase : List[str] = padding_value
_UpperCamelCase : List[Any] = sampling_rate
_UpperCamelCase : Dict = return_attention_mask
_UpperCamelCase : Tuple = do_normalize
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : List[str]=False ,lowerCamelCase__ : Tuple=False ):
'''simple docstring'''
def _flatten(lowerCamelCase__ : Optional[Any] ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_UpperCamelCase : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCamelCase : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
_UpperCamelCase : int = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
class lowercase__ ( lowercase , unittest.TestCase ):
lowercase__ = WavaVecaFeatureExtractor
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = WavaVecaFeatureExtractionTester(self )
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCamelCase__ ,axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ,axis=0 ) - 1 ) < 1E-3 ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCamelCase : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCamelCase : int = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Tuple = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCamelCase : Tuple = feat_extract(speech_inputs[0] ,return_tensors='np' ).input_values
_UpperCamelCase : Any = feat_extract(np_speech_inputs[0] ,return_tensors='np' ).input_values
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
# Test batched
_UpperCamelCase : Union[str, Any] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
_UpperCamelCase : Optional[int] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCamelCase : str = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCamelCase : str = np.asarray(lowerCamelCase__ )
_UpperCamelCase : List[str] = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
_UpperCamelCase : int = feat_extract(lowerCamelCase__ ,return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ ,lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1E-3 ) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : str = ['longest', 'max_length', 'do_not_pad']
_UpperCamelCase : List[str] = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = feat_extract(lowerCamelCase__ ,padding=lowerCamelCase__ ,max_length=lowerCamelCase__ ,return_tensors='np' )
_UpperCamelCase : int = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : List[str] = range(800 ,1400 ,200 )
_UpperCamelCase : List[str] = [floats_list((1, x) )[0] for x in lengths]
_UpperCamelCase : Optional[Any] = ['longest', 'max_length', 'do_not_pad']
_UpperCamelCase : str = [None, 1600, None]
for max_length, padding in zip(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : List[str] = feat_extract(lowerCamelCase__ ,max_length=lowerCamelCase__ ,padding=lowerCamelCase__ )
_UpperCamelCase : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : List[Any] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Union[str, Any] = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=1000 ,padding='max_length' ,return_tensors='np' )
_UpperCamelCase : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : int = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=1000 ,padding='longest' ,return_tensors='np' )
_UpperCamelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
_UpperCamelCase : Optional[int] = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
_UpperCamelCase : Any = feat_extract(
lowerCamelCase__ ,truncation=lowerCamelCase__ ,max_length=2000 ,padding='longest' ,return_tensors='np' )
_UpperCamelCase : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
@require_torch
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
import torch
_UpperCamelCase : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCamelCase : Optional[int] = np.random.rand(100 ).astype(np.floataa )
_UpperCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCamelCase : Optional[int] = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCamelCase : Tuple = feature_extractor.pad([{'input_values': inputs}] ,return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
# this test makes sure that models that are using
# group norm don't have their feature extractor return the
# attention_mask
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_UpperCamelCase : Optional[int] = WavaVecaConfig.from_pretrained(lowerCamelCase__ )
_UpperCamelCase : Any = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask ,config.feat_extract_norm == 'layer' )
| 83 | 0 |
"""simple docstring"""
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
A_ : Tuple = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A_ : Optional[Any] = 1
if upper_limit > 0:
A_ : Optional[Any] = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(UpperCAmelCase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
_lowerCamelCase : Optional[Any] = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'The Catalan numbers from 0 through {N} are:')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 167 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1_0_0_0 ):
_UpperCamelCase : int = 1
_UpperCamelCase : Union[str, Any] = 0
for divide_by_number in range(UpperCAmelCase_ , digit + 1 ):
_UpperCamelCase : list[int] = []
_UpperCamelCase : int = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = len(UpperCAmelCase_ )
_UpperCamelCase : List[Any] = divide_by_number
else:
has_been_divided.append(UpperCAmelCase_ )
_UpperCamelCase : str = now_divide * 1_0 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
'''simple docstring'''
def lowercase__ ( __lowercase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if n == 1 or not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
__UpperCamelCase = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def lowercase__ ( __lowercase : List[Any] ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 2
while digits < n:
index += 1
__UpperCamelCase = len(str(fibonacci(UpperCAmelCase_ ) ) )
return index
def lowercase__ ( __lowercase : List[Any] = 1000 ) -> List[str]:
"""simple docstring"""
return fibonacci_digits_index(UpperCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 53 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
if num < 0:
return False
_UpperCamelCase : int = num
_UpperCamelCase : int = 0
while num > 0:
_UpperCamelCase : str = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _lowerCAmelCase (_lowerCAmelCase):
return x + 2
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 'x = 3'
UpperCamelCase_ = {}
UpperCamelCase_ = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ , {"x": 3} )
UpperCamelCase_ = 'x = y'
UpperCamelCase_ = {'y': 5}
UpperCamelCase_ = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ , {"x": 5, "y": 5} )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 'y = add_two(x)'
UpperCamelCase_ = {'x': 3}
UpperCamelCase_ = evaluate(lowerCamelCase__ , {"add_two": add_two} , state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
UpperCamelCase_ = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
assert result is None
assert "tried to execute add_two" in out.out
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 'x = 3'
UpperCamelCase_ = {}
UpperCamelCase_ = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ , {"x": 3} )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
UpperCamelCase_ = {'x': 3}
UpperCamelCase_ = evaluate(lowerCamelCase__ , {"add_two": add_two} , state=lowerCamelCase__ )
self.assertDictEqual(lowerCamelCase__ , {"x": 3, "y": 5} )
self.assertDictEqual(lowerCamelCase__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 'x = 3\ny = 5'
UpperCamelCase_ = {}
UpperCamelCase_ = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ , {"x": 3, "y": 5} )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 'text = f\'This is x: {x}.\''
UpperCamelCase_ = {'x': 3}
UpperCamelCase_ = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(lowerCamelCase__ , {"x": 3, "text": "This is x: 3."} )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 'if x <= 3:\n y = 2\nelse:\n y = 5'
UpperCamelCase_ = {'x': 3}
UpperCamelCase_ = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(lowerCamelCase__ , {"x": 3, "y": 2} )
UpperCamelCase_ = {'x': 8}
UpperCamelCase_ = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(lowerCamelCase__ , {"x": 8, "y": 5} )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 'test_list = [x, add_two(x)]'
UpperCamelCase_ = {'x': 3}
UpperCamelCase_ = evaluate(lowerCamelCase__ , {"add_two": add_two} , state=lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , [3, 5] )
self.assertDictEqual(lowerCamelCase__ , {"x": 3, "test_list": [3, 5]} )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 'y = x'
UpperCamelCase_ = {'x': 3}
UpperCamelCase_ = evaluate(lowerCamelCase__ , {} , state=lowerCamelCase__ )
assert result == 3
self.assertDictEqual(lowerCamelCase__ , {"x": 3, "y": 3} )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 'test_list = [x, add_two(x)]\ntest_list[1]'
UpperCamelCase_ = {'x': 3}
UpperCamelCase_ = evaluate(lowerCamelCase__ , {"add_two": add_two} , state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ , {"x": 3, "test_list": [3, 5]} )
UpperCamelCase_ = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
UpperCamelCase_ = {'x': 3}
UpperCamelCase_ = evaluate(lowerCamelCase__ , {"add_two": add_two} , state=lowerCamelCase__ )
assert result == 5
self.assertDictEqual(lowerCamelCase__ , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 'x = 0\nfor i in range(3):\n x = i'
UpperCamelCase_ = {}
UpperCamelCase_ = evaluate(lowerCamelCase__ , {"range": range} , state=lowerCamelCase__ )
assert result == 2
self.assertDictEqual(lowerCamelCase__ , {"x": 2, "i": 2} )
| 128 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[str] = abs(UpperCAmelCase_ )
_UpperCamelCase : int = 0
while n > 0:
res += n % 1_0
n //= 1_0
return res
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : List[Any] = abs(UpperCAmelCase_ )
return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 )
def A__ ( UpperCAmelCase_ ):
return sum(int(UpperCAmelCase_ ) for c in str(abs(UpperCAmelCase_ ) ) )
def A__ ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
_UpperCamelCase : str = f'{func.__name__}({value})'
_UpperCamelCase : Tuple = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(UpperCAmelCase_ )} -- {timing:.4f} seconds' )
for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCAmelCase_ , UpperCAmelCase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 83 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=1_6 ,A__=3_6 ,A__=6 ,A__=6 ,A__=6 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=3 ,A__=4 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = embedding_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_hidden_groups
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
lowercase = ids_tensor([self.batch_size] ,self.num_choices)
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,num_hidden_groups=self.num_hidden_groups ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertModel(config=lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
lowercase = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__)
lowercase = model(lowerCamelCase__ ,token_type_ids=lowerCamelCase__)
lowercase = model(lowerCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForPreTraining(config=lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
lowercase = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,sentence_order_label=lowerCamelCase__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape ,(self.batch_size, config.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForMaskedLM(config=lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
lowercase = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForQuestionAnswering(config=lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
lowercase = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,start_positions=lowerCamelCase__ ,end_positions=lowerCamelCase__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = AlbertForSequenceClassification(lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
lowercase = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = AlbertForTokenClassification(config=lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
lowercase = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_choices
lowercase = AlbertForMultipleChoice(config=lowerCamelCase__)
model.to(lowerCamelCase__)
model.eval()
lowercase = input_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = token_type_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = input_mask.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = model(
lowerCamelCase__ ,attention_mask=lowerCamelCase__ ,token_type_ids=lowerCamelCase__ ,labels=lowerCamelCase__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
lowercase
) = config_and_inputs
lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Dict =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : Dict =True
def A__ ( self ,A__ ,A__ ,A__=False):
lowercase = super()._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__)
if return_labels:
if model_class in get_values(lowerCamelCase__):
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=lowerCamelCase__)
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowerCamelCase__)
return inputs_dict
def A__ ( self):
lowercase = AlbertModelTester(self)
lowercase = ConfigTester(self ,config_class=lowerCamelCase__ ,hidden_size=3_7)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*lowerCamelCase__)
@slow
def A__ ( self):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AlbertModel.from_pretrained(lowerCamelCase__)
self.assertIsNotNone(lowerCamelCase__)
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = AlbertModel.from_pretrained('''albert-base-v2''')
lowercase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
lowercase = model(lowerCamelCase__ ,attention_mask=lowerCamelCase__)[0]
lowercase = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape ,lowerCamelCase__)
lowercase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,lowerCamelCase__ ,atol=1E-4))
| 101 |
'''simple docstring'''
from math import pi
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 83 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE , )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[str] = RobertaConfig
A : Tuple = '''roberta'''
def __init__( self, A ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = RobertaEmbeddings(lowerCamelCase__ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE , )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = RobertaConfig
A : str = '''roberta'''
def __init__( self, A ):
'''simple docstring'''
super().__init__(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Any = config.num_labels
SCREAMING_SNAKE_CASE : List[str] = config.num_hidden_layers
SCREAMING_SNAKE_CASE : int = DeeRobertaModel(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(config.hidden_size, self.config.num_labels )
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
def UpperCamelCase_ ( self, A=None, A=None, A=None, A=None, A=None, A=None, A=None, A=-1, A=False, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.num_layers
try:
SCREAMING_SNAKE_CASE : Tuple = self.roberta(
lowerCamelCase__, attention_mask=lowerCamelCase__, token_type_ids=lowerCamelCase__, position_ids=lowerCamelCase__, head_mask=lowerCamelCase__, inputs_embeds=lowerCamelCase__, )
SCREAMING_SNAKE_CASE : Tuple = outputs[1]
SCREAMING_SNAKE_CASE : Optional[int] = self.dropout(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.classifier(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
SCREAMING_SNAKE_CASE : Optional[int] = e.message
SCREAMING_SNAKE_CASE : List[str] = e.exit_layer
SCREAMING_SNAKE_CASE : Optional[Any] = outputs[0]
if not self.training:
SCREAMING_SNAKE_CASE : int = entropy(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : Union[str, Any] = MSELoss()
SCREAMING_SNAKE_CASE : Tuple = loss_fct(logits.view(-1 ), labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : int = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Dict = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) )
# work with highway exits
SCREAMING_SNAKE_CASE : List[Any] = []
for highway_exit in outputs[-1]:
SCREAMING_SNAKE_CASE : List[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
SCREAMING_SNAKE_CASE : List[str] = MSELoss()
SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(highway_logits.view(-1 ), labels.view(-1 ) )
else:
SCREAMING_SNAKE_CASE : Optional[Any] = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : str = loss_fct(highway_logits.view(-1, self.num_labels ), labels.view(-1 ) )
highway_losses.append(lowerCamelCase__ )
if train_highway:
SCREAMING_SNAKE_CASE : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = (loss,) + outputs
if not self.training:
SCREAMING_SNAKE_CASE : int = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
SCREAMING_SNAKE_CASE : Any = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 251 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : int = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class lowercase__ ( lowercase ):
lowercase__ = """mvp"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[Any] ,lowerCamelCase__ : Any=50267 ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Union[str, Any]=16 ,lowerCamelCase__ : List[Any]=12 ,lowerCamelCase__ : Tuple=4096 ,lowerCamelCase__ : Any=16 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : Optional[int]=0.0 ,lowerCamelCase__ : str="gelu" ,lowerCamelCase__ : Optional[int]=1024 ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : List[str]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Union[str, Any]=0.0_2 ,lowerCamelCase__ : Union[str, Any]=0.0 ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : str=1 ,lowerCamelCase__ : Any=0 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Any=True ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : Optional[int]=2 ,lowerCamelCase__ : Optional[int]=False ,lowerCamelCase__ : Tuple=100 ,lowerCamelCase__ : Optional[int]=800 ,**lowerCamelCase__ : int ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = vocab_size
_UpperCamelCase : Union[str, Any] = max_position_embeddings
_UpperCamelCase : Dict = d_model
_UpperCamelCase : Any = encoder_ffn_dim
_UpperCamelCase : Dict = encoder_layers
_UpperCamelCase : Optional[Any] = encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : str = decoder_layers
_UpperCamelCase : int = decoder_attention_heads
_UpperCamelCase : str = dropout
_UpperCamelCase : str = attention_dropout
_UpperCamelCase : List[Any] = activation_dropout
_UpperCamelCase : Dict = activation_function
_UpperCamelCase : List[str] = init_std
_UpperCamelCase : Dict = encoder_layerdrop
_UpperCamelCase : Tuple = decoder_layerdrop
_UpperCamelCase : Optional[int] = classifier_dropout
_UpperCamelCase : str = use_cache
_UpperCamelCase : Union[str, Any] = encoder_layers
_UpperCamelCase : Any = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCamelCase : Any = use_prompt
_UpperCamelCase : Optional[int] = prompt_length
_UpperCamelCase : Any = prompt_mid_dim
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,forced_eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.' )
| 83 | 0 |
"""simple docstring"""
import cmath
import math
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Tuple = math.radians(UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = math.radians(UpperCAmelCase_ )
# Convert voltage and current to rectangular form
lowerCAmelCase : int = cmath.rect(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : int = cmath.rect(UpperCAmelCase_ , UpperCAmelCase_ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class lowercase__ ( lowercase ):
lowercase__ = """openai/whisper-base"""
lowercase__ = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
lowercase__ = """transcriber"""
lowercase__ = WhisperProcessor
lowercase__ = WhisperForConditionalGeneration
lowercase__ = ["""audio"""]
lowercase__ = ["""text"""]
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return self.pre_processor(lowerCamelCase__ ,return_tensors='pt' ).input_features
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return self.model.generate(inputs=lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
return self.pre_processor.batch_decode(lowerCamelCase__ ,skip_special_tokens=lowerCamelCase__ )[0]
| 83 | 0 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
__A : int = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def UpperCamelCase_ ( A__ : Tuple , A__ : Any , A__ : Optional[int] , A__ : Optional[int] , A__ : Dict=False , A__ : List[Any]=True ):
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
lowerCAmelCase_ : Tuple = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowerCAmelCase_ : List[str] = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
lowerCAmelCase_ : Optional[Any] = config_class.from_json_file(UpperCAmelCase_ )
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : List[str] = True
print(f'Building TensorFlow model from configuration: {config}' )
lowerCAmelCase_ : Optional[int] = model_class(UpperCAmelCase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowerCAmelCase_ : str = cached_file(
UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowerCAmelCase_ : Any = load_pytorch_checkpoint_in_tfa_model(UpperCAmelCase_ , UpperCAmelCase_ )
if compare_with_pt_model:
lowerCAmelCase_ : Optional[int] = tf_model(tf_model.dummy_inputs , training=UpperCAmelCase_ ) # build the network
lowerCAmelCase_ : List[Any] = torch.load(UpperCAmelCase_ , map_location="""cpu""" )
lowerCAmelCase_ : List[str] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=UpperCAmelCase_ , config=UpperCAmelCase_ , state_dict=UpperCAmelCase_ )
with torch.no_grad():
lowerCAmelCase_ : Tuple = pt_model(**pt_model.dummy_inputs )
lowerCAmelCase_ : int = pto[0].numpy()
lowerCAmelCase_ : Optional[Any] = tfo[0].numpy()
lowerCAmelCase_ : Dict = np.amax(np.abs(np_pt - np_tf ) )
print(f'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, f'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(f'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(UpperCAmelCase_ , save_format="""h5""" )
def UpperCamelCase_ ( A__ : Optional[Any] , A__ : Optional[Any] , A__ : List[Any]=None , A__ : Any=None , A__ : str=False , A__ : int=False , A__ : Any=False , A__ : Optional[int]=False , ):
'''simple docstring'''
if args_model_type is None:
lowerCAmelCase_ : Optional[Any] = list(MODEL_CLASSES.keys() )
else:
lowerCAmelCase_ : Optional[int] = [args_model_type]
for j, model_type in enumerate(UpperCAmelCase_ , start=1 ):
print("""=""" * 1_00 )
print(f' Converting model type {j}/{len(UpperCAmelCase_ )}: {model_type}' )
print("""=""" * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
lowerCAmelCase_ : int = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowerCAmelCase_ : Union[str, Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowerCAmelCase_ : str = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(UpperCAmelCase_ , UpperCAmelCase_ ) , start=1 ):
print("""-""" * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
lowerCAmelCase_ : Union[str, Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
f' Converting checkpoint {i}/{len(UpperCAmelCase_ )}: {model_shortcut_name} - model_type {model_type}' )
print("""-""" * 1_00 )
if config_shortcut_name in aws_config_map:
lowerCAmelCase_ : Tuple = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
lowerCAmelCase_ : List[Any] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowerCAmelCase_ : Union[str, Any] = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
lowerCAmelCase_ : int = model_shortcut_name
if os.path.isfile(UpperCAmelCase_ ):
lowerCAmelCase_ : Dict = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=UpperCAmelCase_ , pytorch_checkpoint_path=UpperCAmelCase_ , config_file=UpperCAmelCase_ , tf_dump_path=os.path.join(UpperCAmelCase_ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=UpperCAmelCase_ , )
if remove_cached_files:
os.remove(UpperCAmelCase_ )
os.remove(UpperCAmelCase_ )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
__A : Tuple = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 120 |
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
snake_case_ : str = logging.getLogger(__name__)
def A__ ( ):
_UpperCamelCase : List[Any] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=UpperCAmelCase_ , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=UpperCAmelCase_ , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=UpperCAmelCase_ , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=UpperCAmelCase_ , default='data/dump' , help='The dump file prefix.' )
_UpperCamelCase : Any = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Optional[int] = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
_UpperCamelCase : Dict = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
_UpperCamelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Any = tokenizer.special_tokens_map['cls_token'] # `<s>`
_UpperCamelCase : int = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
_UpperCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Optional[Any] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
_UpperCamelCase : Any = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
_UpperCamelCase : List[Any] = fp.readlines()
logger.info('Start encoding' )
logger.info(f'{len(UpperCAmelCase_ )} examples to process.' )
_UpperCamelCase : int = []
_UpperCamelCase : Any = 0
_UpperCamelCase : Any = 1_0_0_0_0
_UpperCamelCase : Optional[Any] = time.time()
for text in data:
_UpperCamelCase : List[Any] = f'{bos} {text.strip()} {sep}'
_UpperCamelCase : Any = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
rslt.append(UpperCAmelCase_ )
iter += 1
if iter % interval == 0:
_UpperCamelCase : Union[str, Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
_UpperCamelCase : Tuple = time.time()
logger.info('Finished binarization' )
logger.info(f'{len(UpperCAmelCase_ )} examples processed.' )
_UpperCamelCase : Optional[int] = f'{args.dump_file}.{args.tokenizer_name}.pickle'
_UpperCamelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
_UpperCamelCase : List[Any] = [np.uintaa(UpperCAmelCase_ ) for d in rslt]
else:
_UpperCamelCase : Any = [np.intaa(UpperCAmelCase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(UpperCAmelCase_ , 'wb' ) as handle:
pickle.dump(rslt_ , UpperCAmelCase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 83 | 0 |
"""simple docstring"""
import os
def snake_case_ ( A_ : List[str] = "matrix.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(UpperCAmelCase_ ), UpperCAmelCase_ ) ) as in_file:
_lowerCamelCase : str = in_file.read()
_lowerCamelCase : List[str] = [[int(UpperCAmelCase_ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
_lowerCamelCase : List[str] = [[0 for cell in row] for row in grid]
_lowerCamelCase : str = len(grid[0] )
_lowerCamelCase : str = [[0 for i in range(UpperCAmelCase_ )] for j in range(UpperCAmelCase_ )]
_lowerCamelCase : Union[str, Any] = grid[0][0]
for i in range(1, UpperCAmelCase_ ):
_lowerCamelCase : Any = grid[0][i] + dp[0][i - 1]
for i in range(1, UpperCAmelCase_ ):
_lowerCamelCase : Union[str, Any] = grid[i][0] + dp[i - 1][0]
for i in range(1, UpperCAmelCase_ ):
for j in range(1, UpperCAmelCase_ ):
_lowerCamelCase : Tuple = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
snake_case_ : List[Any] = None
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Dict = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
snake_case_ : List[Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
snake_case_ : List[str] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
snake_case_ : List[str] = '▁'
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = AlbertTokenizer
def __init__( self : Tuple ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : Optional[int]="[CLS]" ,lowerCamelCase__ : Union[str, Any]="[SEP]" ,lowerCamelCase__ : Optional[int]="<unk>" ,lowerCamelCase__ : str="[SEP]" ,lowerCamelCase__ : List[Any]="<pad>" ,lowerCamelCase__ : Dict="[CLS]" ,lowerCamelCase__ : int="[MASK]" ,**lowerCamelCase__ : Any ,):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCamelCase : Dict = (
AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ,normalized=lowerCamelCase__ )
if isinstance(lowerCamelCase__ ,lowerCamelCase__ )
else mask_token
)
super().__init__(
lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,remove_space=lowerCamelCase__ ,keep_accents=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,**lowerCamelCase__ ,)
_UpperCamelCase : Tuple = do_lower_case
_UpperCamelCase : str = remove_space
_UpperCamelCase : Optional[Any] = keep_accents
_UpperCamelCase : Dict = vocab_file
_UpperCamelCase : Dict = False if not self.vocab_file else True
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : List[Any] = [self.sep_token_id]
_UpperCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
_UpperCamelCase : int = [self.sep_token_id]
_UpperCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCamelCase : Dict = os.path.join(
lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file ,lowerCamelCase__ )
return (out_vocab_file,)
| 83 | 0 |
class A :
def __init__( self : Dict , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
_lowerCamelCase : int =name
_lowerCamelCase : Optional[int] =value
_lowerCamelCase : Tuple =weight
def __repr__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowerCamelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
return self.value
def lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
return self.name
def lowerCamelCase ( self : int ) -> Any:
"""simple docstring"""
return self.weight
def lowerCamelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return self.value / self.weight
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
_lowerCamelCase : str =[]
for i in range(len(UpperCAmelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def a_ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =sorted(UpperCAmelCase_ , key=UpperCAmelCase_ , reverse=UpperCAmelCase_ )
_lowerCamelCase : Union[str, Any] =[]
_lowerCamelCase : List[Any] =0.0, 0.0
for i in range(len(UpperCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def a_ ( ):
'''simple docstring'''
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 199 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowercase__ ( lowercase ):
def __init__( self : Any ,lowerCamelCase__ : str ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : str = dataset
_UpperCamelCase : Optional[Any] = process
_UpperCamelCase : Optional[Any] = params
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.dataset[i]
_UpperCamelCase : Dict = self.process(lowerCamelCase__ ,**self.params )
return processed
class lowercase__ ( lowercase ):
def __init__( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Optional[int]=None ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = loader
_UpperCamelCase : Tuple = infer
_UpperCamelCase : List[str] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCamelCase : Any = None
_UpperCamelCase : Union[str, Any] = loader_batch_size
# Internal bookkeeping
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : str = None
def __len__( self : List[str] ):
'''simple docstring'''
return len(self.loader )
def __iter__( self : int ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = iter(self.loader )
return self
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
if isinstance(self._loader_batch_data ,torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCamelCase : Union[str, Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCamelCase : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
# Convert ModelOutput to tuple first
_UpperCamelCase : str = element.to_tuple()
if isinstance(element[0] ,torch.Tensor ):
_UpperCamelCase : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
_UpperCamelCase : str = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] ,torch.Tensor ):
_UpperCamelCase : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] ,np.ndarray ):
_UpperCamelCase : Tuple = tuple(np.expand_dims(el[self._loader_batch_index] ,0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCamelCase : Optional[int] = None
elif isinstance(element[self._loader_batch_index] ,torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase : int = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] ,np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase : Optional[Any] = np.expand_dims(element[self._loader_batch_index] ,0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCamelCase : Union[str, Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCamelCase : Optional[int] = self._loader_batch_data.__class__(lowerCamelCase__ )
self._loader_batch_index += 1
return result
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCamelCase : Tuple = next(self.iterator )
_UpperCamelCase : List[str] = self.infer(lowerCamelCase__ ,**self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCamelCase__ ,torch.Tensor ):
_UpperCamelCase : List[Any] = processed
else:
_UpperCamelCase : List[Any] = list(processed.keys() )[0]
_UpperCamelCase : Optional[int] = processed[key]
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : int = len(lowerCamelCase__ )
else:
_UpperCamelCase : List[str] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase : int = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCamelCase : Dict = processed
_UpperCamelCase : str = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowercase__ ( lowercase ):
def __init__( self : str ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any=None ):
'''simple docstring'''
super().__init__(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def __iter__( self : Dict ):
'''simple docstring'''
_UpperCamelCase : str = iter(self.loader )
_UpperCamelCase : List[str] = None
return self
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.subiterator is None:
_UpperCamelCase : Tuple = self.infer(next(self.iterator ) ,**self.params )
try:
# Try to return next item
_UpperCamelCase : Optional[Any] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCamelCase : List[Any] = self.infer(next(self.iterator ) ,**self.params )
_UpperCamelCase : int = next(self.subiterator )
return processed
class lowercase__ ( lowercase ):
def __iter__( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Dict = iter(self.loader )
return self
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase : Dict = self.loader_batch_item()
_UpperCamelCase : List[str] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
if is_last:
return accumulator
while not is_last:
_UpperCamelCase : List[Any] = self.infer(next(self.iterator ) ,**self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCamelCase__ ,torch.Tensor ):
_UpperCamelCase : str = processed
else:
_UpperCamelCase : Any = list(processed.keys() )[0]
_UpperCamelCase : Tuple = processed[key]
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Dict = len(lowerCamelCase__ )
else:
_UpperCamelCase : Tuple = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase : Any = observed_batch_size
_UpperCamelCase : List[Any] = processed
_UpperCamelCase : int = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase : List[Any] = self.loader_batch_item()
_UpperCamelCase : Optional[Any] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
if is_last:
return accumulator
else:
_UpperCamelCase : Any = processed
_UpperCamelCase : List[Any] = item.pop('is_last' )
accumulator.append(lowerCamelCase__ )
return accumulator
class lowercase__ ( lowercase ):
def __init__( self : Tuple ,lowerCamelCase__ : Dataset ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : int = dataset
_UpperCamelCase : str = key
def __len__( self : Dict ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : Tuple ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return self.dataset[i][self.key]
class lowercase__ ( lowercase ):
def __init__( self : List[Any] ,lowerCamelCase__ : Dataset ,lowerCamelCase__ : str ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : int = dataset
_UpperCamelCase : Optional[Any] = keya
_UpperCamelCase : str = keya
def __len__( self : List[Any] ):
'''simple docstring'''
return len(self.dataset )
def __getitem__( self : List[str] ,lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 83 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
A__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
A__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 82 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
snake_case_ : Any = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def A__ ( ):
_UpperCamelCase : Tuple = Github(os.environ['GITHUB_TOKEN'] )
_UpperCamelCase : List[Any] = g.get_repo('huggingface/diffusers' )
_UpperCamelCase : List[Any] = repo.get_issues(state='open' )
for issue in open_issues:
_UpperCamelCase : Dict = sorted(issue.get_comments() , key=lambda UpperCAmelCase_ : i.created_at , reverse=UpperCAmelCase_ )
_UpperCamelCase : List[str] = comments[0] if len(UpperCAmelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 83 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
from collections.abc import Callable
def lowercase ( a__ : Any , a__ : List[str] , a__ : List[str] , a__ : int = 100 , ) -> int:
_UpperCamelCase = x_start
_UpperCamelCase = fnc(UpperCAmelCase_ )
_UpperCamelCase = 0.0
for _ in range(UpperCAmelCase_ ):
# Approximates curve as a sequence of linear lines and sums their length
_UpperCamelCase = (x_end - x_start) / steps + xa
_UpperCamelCase = fnc(UpperCAmelCase_ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
_UpperCamelCase = xa
_UpperCamelCase = fxa
return length
if __name__ == "__main__":
def lowercase ( a__ : List[Any] ) -> List[str]:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
UpperCAmelCase = 10
while i <= 100_000:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 256 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowercase ) , """Tatoeba directory does not exist.""" )
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCamelCase__ )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
self.resolver.convert_models(['heb-eng'] )
@slow
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Dict = self.resolver.write_model_card('opus-mt-he-en' ,dry_run=lowerCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 83 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowercase ( __UpperCAmelCase , unittest.TestCase):
# TODO: is there an appropriate internal test set?
__lowerCAmelCase : int = """ssube/stable-diffusion-x4-upscaler-onnx"""
def a_ ( self : List[Any] , _lowerCamelCase : Optional[int]=0 ):
"""simple docstring"""
A_ : Any = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(lowerCamelCase__ ) )
A_ : List[str] = torch.manual_seed(lowerCamelCase__ )
A_ : int = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ : Optional[int] = self.get_dummy_inputs()
A_ : str = pipe(**lowerCamelCase__ ).images
A_ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
A_ : Dict = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def a_ ( self : Optional[Any] ):
"""simple docstring"""
A_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ : Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ : int = self.get_dummy_inputs()
A_ : Tuple = pipe(**lowerCamelCase__ ).images
A_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A_ : Optional[Any] = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ : Tuple = self.get_dummy_inputs()
A_ : Union[str, Any] = pipe(**lowerCamelCase__ ).images
A_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A_ : Dict = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self : str ):
"""simple docstring"""
A_ : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ : Tuple = self.get_dummy_inputs()
A_ : Optional[int] = pipe(**lowerCamelCase__ ).images
A_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A_ : Union[str, Any] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def a_ ( self : Any ):
"""simple docstring"""
A_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A_ : Tuple = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ : Dict = self.get_dummy_inputs()
A_ : int = pipe(**lowerCamelCase__ ).images
A_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A_ : List[str] = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowercase ( unittest.TestCase):
@property
def a_ ( self : Any ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : List[Any] = ort.SessionOptions()
A_ : Tuple = False
return options
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
A_ : Union[str, Any] = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
A_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ : Dict = 'A fantasy landscape, trending on artstation'
A_ : Optional[int] = torch.manual_seed(0 )
A_ : Optional[Any] = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=lowerCamelCase__ , output_type='''np''' , )
A_ : Union[str, Any] = output.images
A_ : Any = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
A_ : List[Any] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def a_ ( self : List[str] ):
"""simple docstring"""
A_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
A_ : List[Any] = init_image.resize((1_28, 1_28) )
A_ : Optional[Any] = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
A_ : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=lowerCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
A_ : Optional[int] = 'A fantasy landscape, trending on artstation'
A_ : List[str] = torch.manual_seed(0 )
A_ : Union[str, Any] = pipe(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=lowerCamelCase__ , output_type='''np''' , )
A_ : List[Any] = output.images
A_ : List[str] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
A_ : List[str] = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 167 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : int = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class lowercase__ ( lowercase ):
lowercase__ = """xlm-prophetnet"""
lowercase__ = ["""past_key_values"""]
lowercase__ = {
"""num_attention_heads""": """num_encoder_attention_heads""",
}
def __init__( self : Optional[int] ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[Union[str, Callable]] = "gelu" ,lowerCamelCase__ : Optional[int] = 30522 ,lowerCamelCase__ : Optional[int] = 1024 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[int] = 4096 ,lowerCamelCase__ : Optional[int] = 12 ,lowerCamelCase__ : Optional[int] = 16 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[float] = 0.1 ,lowerCamelCase__ : Optional[int] = 512 ,lowerCamelCase__ : Optional[float] = 0.0_2 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 2 ,lowerCamelCase__ : Optional[int] = 32 ,lowerCamelCase__ : Optional[int] = 128 ,lowerCamelCase__ : Optional[bool] = False ,lowerCamelCase__ : Optional[float] = 0.0 ,lowerCamelCase__ : Optional[bool] = True ,lowerCamelCase__ : Optional[int] = 0 ,lowerCamelCase__ : Optional[int] = 1 ,lowerCamelCase__ : Optional[int] = 2 ,**lowerCamelCase__ : Union[str, Any] ,):
'''simple docstring'''
_UpperCamelCase : List[Any] = vocab_size
_UpperCamelCase : Union[str, Any] = hidden_size
_UpperCamelCase : str = encoder_ffn_dim
_UpperCamelCase : List[Any] = num_encoder_layers
_UpperCamelCase : Tuple = num_encoder_attention_heads
_UpperCamelCase : Optional[int] = decoder_ffn_dim
_UpperCamelCase : List[Any] = num_decoder_layers
_UpperCamelCase : List[Any] = num_decoder_attention_heads
_UpperCamelCase : Optional[Any] = max_position_embeddings
_UpperCamelCase : str = init_std # Normal(0, this parameter)
_UpperCamelCase : List[str] = activation_function
# parameters for xlmprophetnet
_UpperCamelCase : Tuple = ngram
_UpperCamelCase : Optional[Any] = num_buckets
_UpperCamelCase : Tuple = relative_max_distance
_UpperCamelCase : str = disable_ngram_loss
_UpperCamelCase : str = eps
# 3 Types of Dropout
_UpperCamelCase : Union[str, Any] = attention_dropout
_UpperCamelCase : str = activation_dropout
_UpperCamelCase : List[str] = dropout
_UpperCamelCase : Tuple = use_cache
super().__init__(
pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,is_encoder_decoder=lowerCamelCase__ ,add_cross_attention=lowerCamelCase__ ,decoder_start_token_id=lowerCamelCase__ ,**lowerCamelCase__ ,)
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'
' `num_decoder_layers`.' )
| 83 | 0 |
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : List[str] ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
__UpperCamelCase = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert('RGB' )
__UpperCamelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3) , (0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1) ),
] )
__UpperCamelCase = transform(UpperCAmelCase_ ).unsqueeze(0 ).to(UpperCAmelCase_ )
return image
def lowercase__ ( __lowercase : Dict ) -> Optional[int]:
"""simple docstring"""
if "visual_encoder" in key:
__UpperCamelCase = re.sub('visual_encoder*' , 'vision_model.encoder' , UpperCAmelCase_ )
if "blocks" in key:
__UpperCamelCase = re.sub(R'blocks' , 'layers' , UpperCAmelCase_ )
if "attn" in key:
__UpperCamelCase = re.sub(R'attn' , 'self_attn' , UpperCAmelCase_ )
if "norm1" in key:
__UpperCamelCase = re.sub(R'norm1' , 'layer_norm1' , UpperCAmelCase_ )
if "norm2" in key:
__UpperCamelCase = re.sub(R'norm2' , 'layer_norm2' , UpperCAmelCase_ )
if "encoder.norm" in key:
__UpperCamelCase = re.sub(R'encoder.norm' , 'post_layernorm' , UpperCAmelCase_ )
if "encoder.patch_embed.proj" in key:
__UpperCamelCase = re.sub(R'encoder.patch_embed.proj' , 'embeddings.patch_embedding' , UpperCAmelCase_ )
if "encoder.pos_embed" in key:
__UpperCamelCase = re.sub(R'encoder.pos_embed' , 'embeddings.position_embedding' , UpperCAmelCase_ )
if "encoder.cls_token" in key:
__UpperCamelCase = re.sub(R'encoder.cls_token' , 'embeddings.class_embedding' , UpperCAmelCase_ )
if "self_attn" in key:
__UpperCamelCase = re.sub(R'self_attn.proj' , 'self_attn.projection' , UpperCAmelCase_ )
return key
@torch.no_grad()
def lowercase__ ( __lowercase : str , __lowercase : str=None ) -> List[str]:
"""simple docstring"""
if config_path is not None:
__UpperCamelCase = BlipConfig.from_pretrained(UpperCAmelCase_ )
else:
__UpperCamelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__UpperCamelCase = BlipForConditionalGeneration(UpperCAmelCase_ ).eval()
__UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
__UpperCamelCase = blip_decoder(pretrained=UpperCAmelCase_ , image_size=384 , vit='base' )
__UpperCamelCase = pt_model.eval()
__UpperCamelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
__UpperCamelCase = modified_state_dict.pop(UpperCAmelCase_ )
__UpperCamelCase = rename_key(UpperCAmelCase_ )
__UpperCamelCase = value
hf_model.load_state_dict(UpperCAmelCase_ )
__UpperCamelCase = 384
__UpperCamelCase = load_demo_image(image_size=UpperCAmelCase_ , device='cpu' )
__UpperCamelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
__UpperCamelCase = tokenizer(['a picture of'] ).input_ids
__UpperCamelCase = hf_model.generate(UpperCAmelCase_ , UpperCAmelCase_ )
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
__UpperCamelCase = hf_model.generate(UpperCAmelCase_ )
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(UpperCAmelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__UpperCamelCase = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
__UpperCamelCase = blip_vqa(pretrained=UpperCAmelCase_ , image_size=UpperCAmelCase_ , vit='base' )
vqa_model.eval()
__UpperCamelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
__UpperCamelCase = modified_state_dict.pop(UpperCAmelCase_ )
__UpperCamelCase = rename_key(UpperCAmelCase_ )
__UpperCamelCase = value
__UpperCamelCase = BlipForQuestionAnswering(UpperCAmelCase_ )
hf_vqa_model.load_state_dict(UpperCAmelCase_ )
__UpperCamelCase = ['How many dogs are in this image?']
__UpperCamelCase = tokenizer(UpperCAmelCase_ , return_tensors='pt' ).input_ids
__UpperCamelCase = hf_vqa_model.generate(UpperCAmelCase_ , UpperCAmelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '_vqa' )
__UpperCamelCase = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
__UpperCamelCase = blip_itm(pretrained=UpperCAmelCase_ , image_size=UpperCAmelCase_ , vit='base' )
itm_model.eval()
__UpperCamelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
__UpperCamelCase = modified_state_dict.pop(UpperCAmelCase_ )
__UpperCamelCase = rename_key(UpperCAmelCase_ )
__UpperCamelCase = value
__UpperCamelCase = BlipForImageTextRetrieval(UpperCAmelCase_ )
__UpperCamelCase = ['A picture of a woman with a dog sitting in a beach']
__UpperCamelCase = tokenizer(
UpperCAmelCase_ , return_tensors='pt' , padding='max_length' , truncation=UpperCAmelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(UpperCAmelCase_ )
hf_itm_model.eval()
__UpperCamelCase = hf_itm_model(UpperCAmelCase_ , UpperCAmelCase_ , use_itm_head=UpperCAmelCase_ )
__UpperCamelCase = hf_itm_model(UpperCAmelCase_ , UpperCAmelCase_ , use_itm_head=UpperCAmelCase_ )
assert out[0].item() == 0.2_1_1_0_6_8_7_4_9_4_2_7_7_9_5_4
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5_6_9_8_8_4_5_3_8_6_5_0_5_1_2_7
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '_itm' )
if __name__ == "__main__":
a__ : Optional[Any] =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
a__ : Union[str, Any] =parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 53 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1_0_0_0 ):
_UpperCamelCase : Dict = 3
_UpperCamelCase : Any = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83 | 0 |
def _lowerCAmelCase (_lowerCAmelCase = 2_00):
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 2_00]
UpperCamelCase_ = [0] * (pence + 1)
UpperCamelCase_ = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(UpperCAmelCase_ , pence + 1 , 1):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 7_3682
| 128 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 83 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = emb.weight.shape
lowercase = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ )
lowercase = emb.weight.data
return lin_layer
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.load(UpperCAmelCase_ , map_location='''cpu''' )
lowercase = mam_aaa['args'] or mam_aaa['cfg']['model']
lowercase = mam_aaa['model']
remove_ignore_keys_(UpperCAmelCase_ )
lowercase = state_dict['encoder.embed_tokens.weight'].shape[0]
lowercase = MaMaaaConfig(
vocab_size=UpperCAmelCase_ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
lowercase = state_dict['decoder.embed_tokens.weight']
lowercase = MaMaaaForConditionalGeneration(UpperCAmelCase_ )
model.model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
lowercase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase__ :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase__ :List[Any] = parser.parse_args()
lowercase__ :Union[str, Any] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 101 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
snake_case_ : Any = logging.getLogger(__name__)
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
lowercase__ = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowercase__ :
lowercase__ = field(default=lowercase , metadata={"""help""": """The input training data file (a text file)."""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
if self.train_file is not None:
_UpperCamelCase : List[Any] = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_UpperCamelCase : Union[str, Any] = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowercase__ :
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
def __call__( self : Optional[Any] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[str] = 'label' if 'label' in features[0].keys() else 'labels'
_UpperCamelCase : List[Any] = [feature.pop(lowerCamelCase__ ) for feature in features]
_UpperCamelCase : Dict = len(lowerCamelCase__ )
_UpperCamelCase : List[str] = len(features[0]['input_ids'] )
_UpperCamelCase : List[Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCamelCase__ )] for feature in features
]
_UpperCamelCase : str = list(chain(*lowerCamelCase__ ) )
_UpperCamelCase : Tuple = self.tokenizer.pad(
lowerCamelCase__ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='pt' ,)
# Un-flatten
_UpperCamelCase : str = {k: v.view(lowerCamelCase__ ,lowerCamelCase__ ,-1 ) for k, v in batch.items()}
# Add back labels
_UpperCamelCase : Optional[int] = torch.tensor(lowerCamelCase__ ,dtype=torch.intaa )
return batch
def A__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCamelCase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , UpperCAmelCase_ , UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_UpperCamelCase : Optional[Any] = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase_ )
datasets.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.set_verbosity(UpperCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_UpperCamelCase : Union[str, Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCamelCase : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_UpperCamelCase : Optional[int] = {}
if data_args.train_file is not None:
_UpperCamelCase : Tuple = data_args.train_file
if data_args.validation_file is not None:
_UpperCamelCase : Tuple = data_args.validation_file
_UpperCamelCase : Any = data_args.train_file.split('.' )[-1]
_UpperCamelCase : Union[str, Any] = load_dataset(
UpperCAmelCase_ , data_files=UpperCAmelCase_ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_UpperCamelCase : List[str] = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : int = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_UpperCamelCase : Dict = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_UpperCamelCase : Any = [f'ending{i}' for i in range(4 )]
_UpperCamelCase : int = 'sent1'
_UpperCamelCase : List[str] = 'sent2'
if data_args.max_seq_length is None:
_UpperCamelCase : int = tokenizer.model_max_length
if max_seq_length > 1_0_2_4:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
_UpperCamelCase : int = 1_0_2_4
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_UpperCamelCase : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCAmelCase_ ):
_UpperCamelCase : str = [[context] * 4 for context in examples[context_name]]
_UpperCamelCase : Optional[Any] = examples[question_header_name]
_UpperCamelCase : Tuple = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(UpperCAmelCase_ )
]
# Flatten out
_UpperCamelCase : Optional[int] = list(chain(*UpperCAmelCase_ ) )
_UpperCamelCase : Optional[Any] = list(chain(*UpperCAmelCase_ ) )
# Tokenize
_UpperCamelCase : Tuple = tokenizer(
UpperCAmelCase_ , UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(UpperCAmelCase_ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
_UpperCamelCase : Optional[Any] = raw_datasets['train']
if data_args.max_train_samples is not None:
_UpperCamelCase : Tuple = min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
_UpperCamelCase : Tuple = train_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
_UpperCamelCase : Union[str, Any] = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
_UpperCamelCase : str = raw_datasets['validation']
if data_args.max_eval_samples is not None:
_UpperCamelCase : Union[str, Any] = min(len(UpperCAmelCase_ ) , data_args.max_eval_samples )
_UpperCamelCase : str = eval_dataset.select(range(UpperCAmelCase_ ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
_UpperCamelCase : Dict = eval_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_UpperCamelCase : List[Any] = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase_ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = eval_predictions
_UpperCamelCase : List[str] = np.argmax(UpperCAmelCase_ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_UpperCamelCase : Optional[int] = Trainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , )
# Training
if training_args.do_train:
_UpperCamelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
_UpperCamelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCamelCase : int = last_checkpoint
_UpperCamelCase : List[str] = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCamelCase : Union[str, Any] = train_result.metrics
_UpperCamelCase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
_UpperCamelCase : Optional[Any] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('train' , UpperCAmelCase_ )
trainer.save_metrics('train' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_UpperCamelCase : List[Any] = trainer.evaluate()
_UpperCamelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase_ )
_UpperCamelCase : int = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('eval' , UpperCAmelCase_ )
trainer.save_metrics('eval' , UpperCAmelCase_ )
_UpperCamelCase : Optional[int] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase_ )
else:
trainer.create_model_card(**UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 83 | 0 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowercase__( __UpperCamelCase: Dict ):
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: Optional[Any] ,__UpperCamelCase: Dict ):
"""simple docstring"""
return max(metric_fn(UpperCAmelCase_ ,UpperCAmelCase_ ) for gt in ground_truths )
def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: str ,__UpperCamelCase: Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(UpperCAmelCase_ ,'r' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = []
if args.gold_data_mode == "qa":
SCREAMING_SNAKE_CASE : List[str] = pd.read_csv(UpperCAmelCase_ ,sep='\t' ,header=UpperCAmelCase_ )
for answer_list in data[1]:
SCREAMING_SNAKE_CASE : Any = ast.literal_eval(UpperCAmelCase_ )
answers.append(UpperCAmelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = [line.strip() for line in open(UpperCAmelCase_ ,'r' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = [[reference] for reference in references]
SCREAMING_SNAKE_CASE : List[str] = 0
for prediction, ground_truths in zip(UpperCAmelCase_ ,UpperCAmelCase_ ):
total += 1
em += metric_max_over_ground_truths(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
fa += metric_max_over_ground_truths(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Dict = 1_0_0.0 * em / total
SCREAMING_SNAKE_CASE : int = 1_0_0.0 * fa / total
logger.info(f"F1: {fa:.2f}" )
logger.info(f"EM: {em:.2f}" )
def lowercase__( __UpperCamelCase: Union[str, Any] ,__UpperCamelCase: str ,__UpperCamelCase: List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = args.k
SCREAMING_SNAKE_CASE : Dict = [line.strip() for line in open(UpperCAmelCase_ ,'r' ).readlines()]
SCREAMING_SNAKE_CASE : str = [line.strip() for line in open(UpperCAmelCase_ ,'r' ).readlines()]
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for hypo, reference in zip(UpperCAmelCase_ ,UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = set(hypo.split('\t' )[:k] )
SCREAMING_SNAKE_CASE : List[Any] = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
SCREAMING_SNAKE_CASE : Any = 1_0_0.0 * em / total
logger.info(f"Precision@{k}: {em: .2f}" )
def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: Any ,__UpperCamelCase: Any ):
"""simple docstring"""
def strip_title(__UpperCamelCase: Tuple ):
if title.startswith('"' ):
SCREAMING_SNAKE_CASE : List[str] = title[1:]
if title.endswith('"' ):
SCREAMING_SNAKE_CASE : Any = title[:-1]
return title
SCREAMING_SNAKE_CASE : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ ,return_tensors='pt' ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,)['input_ids'].to(args.device )
SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = question_enc_outputs[0]
SCREAMING_SNAKE_CASE : str = rag_model.retriever(
UpperCAmelCase_ ,question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() ,prefix=rag_model.rag.generator.config.prefix ,n_docs=rag_model.config.n_docs ,return_tensors='pt' ,)
SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for docs in all_docs:
SCREAMING_SNAKE_CASE : Any = [strip_title(UpperCAmelCase_ ) for title in docs['title']]
provenance_strings.append('\t'.join(UpperCAmelCase_ ) )
return provenance_strings
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: List[Any] ,__UpperCamelCase: Tuple ):
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE : int = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
UpperCAmelCase_ ,return_tensors='pt' ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device )
SCREAMING_SNAKE_CASE : Optional[int] = inputs_dict.attention_mask.to(args.device )
SCREAMING_SNAKE_CASE : str = rag_model.generate( # rag_model overwrites generate
UpperCAmelCase_ ,attention_mask=UpperCAmelCase_ ,num_beams=args.num_beams ,min_length=args.min_length ,max_length=args.max_length ,early_stopping=UpperCAmelCase_ ,num_return_sequences=1 ,bad_words_ids=[[0, 0]] ,)
SCREAMING_SNAKE_CASE : List[str] = rag_model.retriever.generator_tokenizer.batch_decode(UpperCAmelCase_ ,skip_special_tokens=UpperCAmelCase_ )
if args.print_predictions:
for q, a in zip(UpperCAmelCase_ ,UpperCAmelCase_ ):
logger.info('Q: {} - A: {}'.format(UpperCAmelCase_ ,UpperCAmelCase_ ) )
return answers
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument(
'--model_type' ,choices=['rag_sequence', 'rag_token', 'bart'] ,type=UpperCAmelCase_ ,help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) ,)
parser.add_argument(
'--index_name' ,default=UpperCAmelCase_ ,choices=['exact', 'compressed', 'legacy'] ,type=UpperCAmelCase_ ,help='RAG model retriever type' ,)
parser.add_argument(
'--index_path' ,default=UpperCAmelCase_ ,type=UpperCAmelCase_ ,help='Path to the retrieval index' ,)
parser.add_argument('--n_docs' ,default=5 ,type=UpperCAmelCase_ ,help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' ,default=UpperCAmelCase_ ,type=UpperCAmelCase_ ,required=UpperCAmelCase_ ,help='Path to pretrained checkpoints or model identifier from huggingface.co/models' ,)
parser.add_argument(
'--eval_mode' ,choices=['e2e', 'retrieval'] ,default='e2e' ,type=UpperCAmelCase_ ,help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) ,)
parser.add_argument('--k' ,default=1 ,type=UpperCAmelCase_ ,help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' ,default=UpperCAmelCase_ ,type=UpperCAmelCase_ ,required=UpperCAmelCase_ ,help='Path to a file containing evaluation samples' ,)
parser.add_argument(
'--gold_data_path' ,default=UpperCAmelCase_ ,type=UpperCAmelCase_ ,required=UpperCAmelCase_ ,help='Path to a tab-separated file with gold samples' ,)
parser.add_argument(
'--gold_data_mode' ,default='qa' ,type=UpperCAmelCase_ ,choices=['qa', 'ans'] ,help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) ,)
parser.add_argument(
'--predictions_path' ,type=UpperCAmelCase_ ,default='predictions.txt' ,help='Name of the predictions file, to be stored in the checkpoints directory' ,)
parser.add_argument(
'--eval_all_checkpoints' ,action='store_true' ,help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' ,)
parser.add_argument(
'--eval_batch_size' ,default=8 ,type=UpperCAmelCase_ ,help='Batch size per GPU/CPU for evaluation.' ,)
parser.add_argument(
'--recalculate' ,help='Recalculate predictions even if the prediction file exists' ,action='store_true' ,)
parser.add_argument(
'--num_beams' ,default=4 ,type=UpperCAmelCase_ ,help='Number of beams to be used when generating answers' ,)
parser.add_argument('--min_length' ,default=1 ,type=UpperCAmelCase_ ,help='Min length of the generated answers' )
parser.add_argument('--max_length' ,default=50 ,type=UpperCAmelCase_ ,help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' ,action='store_true' ,help='If True, prints predictions while evaluating.' ,)
parser.add_argument(
'--print_docs' ,action='store_true' ,help='If True, prints docs retried while generating.' ,)
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def lowercase__( __UpperCamelCase: List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {}
if args.model_type is None:
SCREAMING_SNAKE_CASE : Tuple = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
SCREAMING_SNAKE_CASE : str = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
SCREAMING_SNAKE_CASE : Dict = args.n_docs
if args.index_name is not None:
SCREAMING_SNAKE_CASE : int = args.index_name
if args.index_path is not None:
SCREAMING_SNAKE_CASE : int = args.index_path
else:
SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration
SCREAMING_SNAKE_CASE : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' ,UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
SCREAMING_SNAKE_CASE : Dict = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(UpperCAmelCase_ ,args.predictions_path ,args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(UpperCAmelCase_ ) )
logger.info(' Batch size = %d' ,args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
SCREAMING_SNAKE_CASE : Dict = RagRetriever.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = model_class.from_pretrained(UpperCAmelCase_ ,retriever=UpperCAmelCase_ ,**UpperCAmelCase_ )
model.retriever.init_retrieval()
else:
SCREAMING_SNAKE_CASE : int = model_class.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
model.to(args.device )
with open(args.evaluation_set ,'r' ) as eval_file, open(args.predictions_path ,'w' ) as preds_file:
SCREAMING_SNAKE_CASE : str = []
for line in tqdm(UpperCAmelCase_ ):
questions.append(line.strip() )
if len(UpperCAmelCase_ ) == args.eval_batch_size:
SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
preds_file.write('\n'.join(UpperCAmelCase_ ) + '\n' )
preds_file.flush()
SCREAMING_SNAKE_CASE : Optional[Any] = []
if len(UpperCAmelCase_ ) > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate_batch_fn(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
preds_file.write('\n'.join(UpperCAmelCase_ ) )
preds_file.flush()
score_fn(UpperCAmelCase_ ,args.predictions_path ,args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase_ = get_args()
main(args)
| 251 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
lowercase__ = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
lowercase__ = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def A__ ( ):
_UpperCamelCase : Optional[Any] = HfArgumentParser((ModelArguments,) )
((_UpperCamelCase) , ) : Optional[int] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_UpperCamelCase : Any = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_UpperCamelCase : str = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_UpperCamelCase : str = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_UpperCamelCase : List[Any] = True
_UpperCamelCase : Union[str, Any] = True
_UpperCamelCase : str = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=UpperCAmelCase_ , decoder_config=UpperCAmelCase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_UpperCamelCase : str = decoder_config.decoder_start_token_id
_UpperCamelCase : Optional[int] = decoder_config.pad_token_id
if decoder_start_token_id is None:
_UpperCamelCase : int = decoder_config.bos_token_id
if pad_token_id is None:
_UpperCamelCase : Dict = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_UpperCamelCase : List[Any] = decoder_config.eos_token_id
_UpperCamelCase : Dict = decoder_start_token_id
_UpperCamelCase : int = pad_token_id
_UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_UpperCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 83 | 0 |
"""simple docstring"""
lowerCAmelCase__ = {
'meter': 'm',
'kilometer': 'km',
'megametre': 'Mm',
'gigametre': 'Gm',
'terametre': 'Tm',
'petametre': 'Pm',
'exametre': 'Em',
'zettametre': 'Zm',
'yottametre': 'Ym',
}
# Exponent of the factor(meter)
lowerCAmelCase__ = {
'm': 0,
'km': 3,
'Mm': 6,
'Gm': 9,
'Tm': 12,
'Pm': 15,
'Em': 18,
'Zm': 21,
'Ym': 24,
}
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
lowerCAmelCase : Tuple = from_type.lower().strip("s" )
lowerCAmelCase : Tuple = to_type.lower().strip("s" )
lowerCAmelCase : Optional[int] = UNIT_SYMBOL.get(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Tuple = UNIT_SYMBOL.get(UpperCAmelCase_ , UpperCAmelCase_ )
if from_sanitized not in METRIC_CONVERSION:
lowerCAmelCase : List[Any] = (
f"""Invalid \'from_type\' value: {from_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(UpperCAmelCase_ )}"""
)
raise ValueError(UpperCAmelCase_ )
if to_sanitized not in METRIC_CONVERSION:
lowerCAmelCase : int = (
f"""Invalid \'to_type\' value: {to_type!r}.\n"""
f"""Conversion abbreviations are: {", ".join(UpperCAmelCase_ )}"""
)
raise ValueError(UpperCAmelCase_ )
lowerCAmelCase : Tuple = METRIC_CONVERSION[from_sanitized]
lowerCAmelCase : int = METRIC_CONVERSION[to_sanitized]
lowerCAmelCase : str = 1
if from_exponent > to_exponent:
lowerCAmelCase : Tuple = from_exponent - to_exponent
else:
lowerCAmelCase : Optional[int] = -(to_exponent - from_exponent)
return value * pow(1_0 , UpperCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 108 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case_ : Dict = logging.get_logger(__name__)
class lowercase__ ( lowercase ):
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : int ,lowerCamelCase__ : float ,**lowerCamelCase__ : int ):
'''simple docstring'''
_UpperCamelCase : List[Any] = feature_size
_UpperCamelCase : Any = sampling_rate
_UpperCamelCase : Optional[Any] = padding_value
_UpperCamelCase : Union[str, Any] = kwargs.pop('padding_side' ,'right' )
_UpperCamelCase : Dict = kwargs.pop('return_attention_mask' ,lowerCamelCase__ )
super().__init__(**lowerCamelCase__ )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] ,lowerCamelCase__ : Union[bool, str, PaddingStrategy] = True ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,lowerCamelCase__ : Optional[Union[str, TensorType]] = None ,):
'''simple docstring'''
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(lowerCamelCase__ ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
_UpperCamelCase : int = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
_UpperCamelCase : List[Any] = processed_features[self.model_input_names[0]]
_UpperCamelCase : Dict = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowerCamelCase__ ) == 0:
if return_attention_mask:
_UpperCamelCase : Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_UpperCamelCase : List[str] = required_input[0]
if isinstance(lowerCamelCase__ ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_UpperCamelCase : List[str] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowerCamelCase__ ):
_UpperCamelCase : Dict = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowerCamelCase__ ):
_UpperCamelCase : Any = 'tf'
elif is_torch_tensor(lowerCamelCase__ ):
_UpperCamelCase : Optional[int] = 'pt'
elif isinstance(lowerCamelCase__ ,(int, float, list, tuple, np.ndarray) ):
_UpperCamelCase : int = 'np'
else:
raise ValueError(
F'type of {first_element} unknown: {type(lowerCamelCase__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
_UpperCamelCase : Any = to_numpy(lowerCamelCase__ )
else:
_UpperCamelCase : Any = [to_numpy(lowerCamelCase__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
_UpperCamelCase : Optional[int] = self._get_padding_strategies(padding=lowerCamelCase__ ,max_length=lowerCamelCase__ )
_UpperCamelCase : str = processed_features[self.model_input_names[0]]
_UpperCamelCase : List[str] = len(lowerCamelCase__ )
if not all(len(lowerCamelCase__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
_UpperCamelCase : List[str] = []
for i in range(lowerCamelCase__ ):
_UpperCamelCase : List[str] = {k: v[i] for k, v in processed_features.items()}
# truncation
_UpperCamelCase : List[str] = self._truncate(
lowerCamelCase__ ,max_length=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,truncation=lowerCamelCase__ ,)
truncated_inputs.append(lowerCamelCase__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_UpperCamelCase : Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_UpperCamelCase : Any = PaddingStrategy.MAX_LENGTH
_UpperCamelCase : Optional[Any] = {}
for i in range(lowerCamelCase__ ):
# padding
_UpperCamelCase : Any = self._pad(
truncated_inputs[i] ,max_length=lowerCamelCase__ ,padding_strategy=lowerCamelCase__ ,pad_to_multiple_of=lowerCamelCase__ ,return_attention_mask=lowerCamelCase__ ,)
for key, value in outputs.items():
if key not in batch_outputs:
_UpperCamelCase : Dict = []
if value.dtype is np.dtype(np.floataa ):
_UpperCamelCase : Any = value.astype(np.floataa )
batch_outputs[key].append(lowerCamelCase__ )
return BatchFeature(lowerCamelCase__ ,tensor_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_UpperCamelCase : Optional[Any] = len(lowerCamelCase__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCamelCase : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCamelCase : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowerCamelCase__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_UpperCamelCase : Tuple = np.ones(len(lowerCamelCase__ ) ,dtype=np.intaa )
if needs_to_be_padded:
_UpperCamelCase : Dict = max_length - len(lowerCamelCase__ )
if self.padding_side == "right":
if return_attention_mask:
_UpperCamelCase : Optional[int] = np.pad(
processed_features['attention_mask'] ,(0, difference) )
_UpperCamelCase : Union[str, Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_UpperCamelCase : List[Any] = np.pad(
lowerCamelCase__ ,lowerCamelCase__ ,'constant' ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_UpperCamelCase : List[Any] = np.pad(
processed_features['attention_mask'] ,(difference, 0) )
_UpperCamelCase : List[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_UpperCamelCase : List[str] = np.pad(
lowerCamelCase__ ,lowerCamelCase__ ,'constant' ,constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Union[Dict[str, np.ndarray], BatchFeature] ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[int] = None ,lowerCamelCase__ : Optional[bool] = None ,):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
_UpperCamelCase : int = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_UpperCamelCase : Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_UpperCamelCase : Optional[int] = len(lowerCamelCase__ ) > max_length
if needs_to_be_truncated:
_UpperCamelCase : Dict = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_UpperCamelCase : Optional[Any] = processed_features['attention_mask'][:max_length]
return processed_features
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : int=False ,lowerCamelCase__ : Optional[Any]=None ):
'''simple docstring'''
# Get padding strategy
if padding is not False:
if padding is True:
_UpperCamelCase : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Tuple = PaddingStrategy(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_UpperCamelCase : Union[str, Any] = padding
else:
_UpperCamelCase : List[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 83 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__A : Dict = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@classmethod
def __lowercase ( cls : str ) -> Optional[int]:
lowerCAmelCase_ : List[str] = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def __lowercase ( cls : List[Any] ) -> Dict:
try:
delete_repo(token=cls._token , repo_id="""test-model-flax""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-model-flax-org""" )
except HTTPError:
pass
def __lowercase ( self : Tuple ) -> List[Any]:
lowerCAmelCase_ : Tuple = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase_ : int = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub("""test-model-flax""" , use_auth_token=self._token )
lowerCAmelCase_ : Union[str, Any] = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
lowerCAmelCase_ : Optional[int] = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase_ : List[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase_ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1E-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="""test-model-flax""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCamelCase__ , repo_id="""test-model-flax""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
lowerCAmelCase_ : List[str] = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
lowerCAmelCase_ : Any = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase_ : Tuple = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase_ : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1E-3 , msg=F'{key} not identical' )
def __lowercase ( self : str ) -> str:
lowerCAmelCase_ : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
lowerCAmelCase_ : List[Any] = FlaxBertModel(lowerCamelCase__ )
model.push_to_hub("""valid_org/test-model-flax-org""" , use_auth_token=self._token )
lowerCAmelCase_ : List[str] = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
lowerCAmelCase_ : Any = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase_ : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase_ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1E-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-model-flax-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
lowerCamelCase__ , repo_id="""valid_org/test-model-flax-org""" , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
lowerCAmelCase_ : str = FlaxBertModel.from_pretrained("""valid_org/test-model-flax-org""" )
lowerCAmelCase_ : Tuple = flatten_dict(unfreeze(model.params ) )
lowerCAmelCase_ : Dict = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
lowerCAmelCase_ : Tuple = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(lowerCamelCase__ , 1E-3 , msg=F'{key} not identical' )
def UpperCamelCase_ ( A__ : int , A__ : Dict ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = True
lowerCAmelCase_ : Union[str, Any] = flatten_dict(modela.params )
lowerCAmelCase_ : Any = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
lowerCAmelCase_ : List[str] = False
return models_are_equal
@require_flax
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : int ) -> Dict:
lowerCAmelCase_ : Dict = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowerCAmelCase_ : Optional[Any] = FlaxBertModel(lowerCamelCase__ )
lowerCAmelCase_ : Union[str, Any] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) )
with self.assertRaises(lowerCamelCase__ ):
lowerCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ )
lowerCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__ , lowerCamelCase__ ) )
def __lowercase ( self : Any ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
lowerCAmelCase_ : str = FlaxBertModel(lowerCamelCase__ )
lowerCAmelCase_ : Any = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , max_shard_size="""10KB""" )
with self.assertRaises(lowerCamelCase__ ):
lowerCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ )
lowerCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertTrue(check_models_equal(lowerCamelCase__ , lowerCamelCase__ ) )
def __lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = 'bert'
lowerCAmelCase_ : Tuple = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(lowerCamelCase__ ):
lowerCAmelCase_ : Tuple = FlaxBertModel.from_pretrained(lowerCamelCase__ )
lowerCAmelCase_ : Optional[int] = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowercase ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase_ : Any = 'bert'
lowerCAmelCase_ : Union[str, Any] = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(lowerCamelCase__ ):
lowerCAmelCase_ : Dict = FlaxBertModel.from_pretrained(lowerCamelCase__ )
lowerCAmelCase_ : Dict = FlaxBertModel.from_pretrained(lowerCamelCase__ , subfolder=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 120 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__ :
def __init__( self : List[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : MutableSequence[float] ):
'''simple docstring'''
if len(lowerCamelCase__ ) != degree + 1:
raise ValueError(
'The number of coefficients should be equal to the degree + 1.' )
_UpperCamelCase : list[float] = list(lowerCamelCase__ )
_UpperCamelCase : Tuple = degree
def __add__( self : Optional[int] ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_UpperCamelCase : str = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,lowerCamelCase__ )
else:
_UpperCamelCase : str = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,lowerCamelCase__ )
def __sub__( self : Dict ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self : Dict ):
'''simple docstring'''
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self : Union[str, Any] ,lowerCamelCase__ : Polynomial ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : int | float ):
'''simple docstring'''
_UpperCamelCase : int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Dict = ''
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowerCamelCase__ )
return polynomial
def __repr__( self : List[str] ):
'''simple docstring'''
return self.__str__()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * self.degree
for i in range(self.degree ):
_UpperCamelCase : Optional[int] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : int | float = 0 ):
'''simple docstring'''
_UpperCamelCase : list[float] = [0] * (self.degree + 2)
_UpperCamelCase : Any = constant
for i in range(self.degree + 1 ):
_UpperCamelCase : Optional[Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,lowerCamelCase__ )
def __eq__( self : str ,lowerCamelCase__ : object ):
'''simple docstring'''
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] ,lowerCamelCase__ : object ):
'''simple docstring'''
return not self.__eq__(lowerCamelCase__ )
| 83 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__A , )
assert hasattr(self , """env""" )
def __lowerCAmelCase ( self , __A ) -> Any:
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase_ :Union[str, Any] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase_ :Tuple = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase_ :Any = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=__A , py_version="""py36""" , )
def __lowerCAmelCase ( self , __A ) -> List[Any]:
TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __lowerCAmelCase ( self , __A ) -> List[str]:
# create estimator
lowerCAmelCase_ :Any = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase_ :Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase_ :List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase_ :Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase_ :Optional[int] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __A )
| 84 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _SCREAMING_SNAKE_CASE :
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase_ :str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase_ :int = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase_ :str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Dict = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = inputs["""prompt"""]
lowerCAmelCase_ :Optional[int] = inputs["""generator"""]
lowerCAmelCase_ :Any = inputs["""num_inference_steps"""]
lowerCAmelCase_ :Optional[int] = inputs["""output_type"""]
if "image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""image"""]
else:
lowerCAmelCase_ :int = None
if "mask_image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""mask_image"""]
else:
lowerCAmelCase_ :int = None
if "original_image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""original_image"""]
else:
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A )
# inputs with prompt converted to embeddings
lowerCAmelCase_ :List[str] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase_ :int = image
if mask_image is not None:
lowerCAmelCase_ :Tuple = mask_image
if original_image is not None:
lowerCAmelCase_ :Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__A , __A , __A )
lowerCAmelCase_ :Optional[int] = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""]
lowerCAmelCase_ :Any = inputs["""num_inference_steps"""]
lowerCAmelCase_ :Tuple = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowerCAmelCase_ :Tuple = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase_ :Optional[int] = image
if mask_image is not None:
lowerCAmelCase_ :str = mask_image
if original_image is not None:
lowerCAmelCase_ :Tuple = original_image
lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0]
lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max()
self.assertLess(__A , 1E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Any = self.get_dummy_components()
lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Dict = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = pipe_loaded(**__A )[0]
lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max()
self.assertLess(__A , 1E-4 )
| 84 | 1 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
UpperCAmelCase_ :List[Any] = 0
UpperCAmelCase_ :int = 1
@add_end_docstrings(A__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Union[str, Any] = "generated"
def __init__( self , *__A , **__A ) -> int:
super().__init__(*__A , **__A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __lowerCAmelCase ( self , __A=None , __A=None , __A=None , __A=None , __A=None , __A=None , **__A , ) -> str:
lowerCAmelCase_ :List[str] = {}
if truncation is not None:
lowerCAmelCase_ :List[Any] = truncation
lowerCAmelCase_ :Optional[int] = generate_kwargs
lowerCAmelCase_ :Optional[int] = {}
if return_tensors is not None and return_type is None:
lowerCAmelCase_ :Optional[int] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowerCAmelCase_ :Any = return_type
if clean_up_tokenization_spaces is not None:
lowerCAmelCase_ :str = clean_up_tokenization_spaces
if stop_sequence is not None:
lowerCAmelCase_ :int = self.tokenizer.encode(__A , add_special_tokens=__A )
if len(__A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowerCAmelCase_ :Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCAmelCase ( self , __A , __A , __A ) -> Optional[Any]:
return True
def __lowerCAmelCase ( self , *__A , __A ) -> List[Any]:
lowerCAmelCase_ :List[str] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
lowerCAmelCase_ :Dict = ([prefix + arg for arg in args[0]],)
lowerCAmelCase_ :Optional[int] = True
elif isinstance(args[0] , __A ):
lowerCAmelCase_ :Any = (prefix + args[0],)
lowerCAmelCase_ :Optional[Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowerCAmelCase_ :List[str] = self.tokenizer(*__A , padding=__A , truncation=__A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *__A , **__A ) -> Optional[int]:
lowerCAmelCase_ :Any = super().__call__(*__A , **__A )
if (
isinstance(args[0] , __A )
and all(isinstance(__A , __A ) for el in args[0] )
and all(len(__A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __lowerCAmelCase ( self , __A , __A=TruncationStrategy.DO_NOT_TRUNCATE , **__A ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = self._parse_and_tokenize(__A , truncation=__A , **__A )
return inputs
def __lowerCAmelCase ( self , __A , **__A ) -> str:
if self.framework == "pt":
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = tf.shape(model_inputs["""input_ids"""] ).numpy()
lowerCAmelCase_ :Optional[Any] = generate_kwargs.get("""min_length""" , self.model.config.min_length )
lowerCAmelCase_ :Union[str, Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
lowerCAmelCase_ :Optional[Any] = self.model.generate(**__A , **__A )
lowerCAmelCase_ :Optional[Any] = output_ids.shape[0]
if self.framework == "pt":
lowerCAmelCase_ :str = output_ids.reshape(__A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowerCAmelCase_ :Tuple = tf.reshape(__A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __lowerCAmelCase ( self , __A , __A=ReturnType.TEXT , __A=False ) -> List[str]:
lowerCAmelCase_ :int = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowerCAmelCase_ :Optional[Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowerCAmelCase_ :str = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A , )
}
records.append(__A )
return records
@add_end_docstrings(A__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Any = "summary"
def __call__( self , *__A , **__A ) -> Tuple:
return super().__call__(*__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(A__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[str] = "translation"
def __lowerCAmelCase ( self , __A , __A , __A ) -> Dict:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def __lowerCAmelCase ( self , *__A , __A=TruncationStrategy.DO_NOT_TRUNCATE , __A=None , __A=None ) -> Union[str, Any]:
if getattr(self.tokenizer , """_build_translation_inputs""" , __A ):
return self.tokenizer._build_translation_inputs(
*__A , return_tensors=self.framework , truncation=__A , src_lang=__A , tgt_lang=__A )
else:
return super()._parse_and_tokenize(*__A , truncation=__A )
def __lowerCAmelCase ( self , __A=None , __A=None , **__A ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = super()._sanitize_parameters(**__A )
if src_lang is not None:
lowerCAmelCase_ :List[str] = src_lang
if tgt_lang is not None:
lowerCAmelCase_ :Tuple = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowerCAmelCase_ :Dict = kwargs.get("""task""" , self.task )
lowerCAmelCase_ :Any = task.split("""_""" )
if task and len(__A ) == 4:
# translation, XX, to YY
lowerCAmelCase_ :Optional[int] = items[1]
lowerCAmelCase_ :Optional[Any] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *__A , **__A ) -> str:
return super().__call__(*__A , **__A )
| 84 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCAmelCase_ :int = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :List[Any] = jax.device_count()
lowerCAmelCase_ :Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A )
lowerCAmelCase_ :Optional[Any] = replicate(__A )
lowerCAmelCase_ :Union[str, Any] = shard(__A )
lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() )
lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2"""
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
__A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCAmelCase_ :Optional[int] = scheduler_params
lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :Tuple = jax.device_count()
lowerCAmelCase_ :str = num_samples * [prompt]
lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A )
lowerCAmelCase_ :Tuple = replicate(__A )
lowerCAmelCase_ :Optional[int] = shard(__A )
lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 )
lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() )
lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 84 | 1 |
"""simple docstring"""
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__UpperCAmelCase = 2_99_79_24_58
# Symbols
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = symbols('ct x y z')
def _snake_case ( lowercase__ : float ) -> float:
'''simple docstring'''
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def _snake_case ( lowercase__ : float ) -> float:
'''simple docstring'''
return 1 / sqrt(1 - beta(lowercase__ ) ** 2 )
def _snake_case ( lowercase__ : float ) -> np.ndarray:
'''simple docstring'''
return np.array(
[
[gamma(lowercase__ ), -gamma(lowercase__ ) * beta(lowercase__ ), 0, 0],
[-gamma(lowercase__ ) * beta(lowercase__ ), gamma(lowercase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _snake_case ( lowercase__ : float , lowercase__ : np.ndarray | None = None ) -> np.ndarray:
'''simple docstring'''
if event is None:
lowerCAmelCase_ :Tuple = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowercase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__UpperCAmelCase = transform(29_97_92_45)
print('Example of four vector: ')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
__UpperCAmelCase = {ct: c, x: 1, y: 1, z: 1}
__UpperCAmelCase = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 84 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _snake_case ( ) -> Generator[int, None, None]:
'''simple docstring'''
lowerCAmelCase_ :dict[int, int] = {}
lowerCAmelCase_ :int = 2
while True:
lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ )
if factor:
lowerCAmelCase_ :Optional[int] = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase_ :List[str] = factor
else:
lowerCAmelCase_ :Optional[int] = prime
yield prime
prime += 1
def _snake_case ( lowercase__ : float = 1E10 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = sieve()
lowerCAmelCase_ :str = 1
while True:
lowerCAmelCase_ :int = next(lowercase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowercase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 84 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : str ) -> bool:
'''simple docstring'''
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
lowerCAmelCase_ :str = sorted(string.lower() )
return len(lowercase__ ) == len(set(lowercase__ ) )
if __name__ == "__main__":
__UpperCAmelCase = input('Enter a string ').strip()
__UpperCAmelCase = is_isogram(input_str)
print(F"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 84 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
UpperCAmelCase_ :List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx"
def __lowerCAmelCase ( self , __A=0 ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) )
lowerCAmelCase_ :List[Any] = torch.manual_seed(__A )
lowerCAmelCase_ :Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Dict = pipe(**__A ).images
lowerCAmelCase_ :Any = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :int = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :int = self.get_dummy_inputs()
lowerCAmelCase_ :List[str] = pipe(**__A ).images
lowerCAmelCase_ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :str = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images
lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Tuple = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Optional[Any] = pipe(**__A ).images
lowerCAmelCase_ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Tuple = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Dict = pipe(**__A ).images
lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Dict = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[int] = ort.SessionOptions()
lowerCAmelCase_ :Dict = False
return options
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCAmelCase_ :Optional[Any] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Union[str, Any] = """A fantasy landscape, trending on artstation"""
lowerCAmelCase_ :List[Any] = torch.manual_seed(0 )
lowerCAmelCase_ :str = pipe(
prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :Dict = output.images
lowerCAmelCase_ :List[str] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCAmelCase_ :List[str] = init_image.resize((128, 128) )
lowerCAmelCase_ :Any = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
lowerCAmelCase_ :Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation"""
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = pipe(
prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :int = output.images
lowerCAmelCase_ :List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Union[str, Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 84 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : dict ) -> set:
'''simple docstring'''
lowerCAmelCase_ :Any = set()
# edges = list of graph's edges
lowerCAmelCase_ :List[Any] = get_edges(lowercase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = edges.pop()
chosen_vertices.add(lowercase__ )
chosen_vertices.add(lowercase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowercase__ )
return chosen_vertices
def _snake_case ( lowercase__ : dict ) -> set:
'''simple docstring'''
lowerCAmelCase_ :Dict = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 84 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__A , )
assert hasattr(self , """env""" )
def __lowerCAmelCase ( self , __A ) -> Any:
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase_ :Union[str, Any] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase_ :Tuple = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase_ :Any = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=__A , py_version="""py36""" , )
def __lowerCAmelCase ( self , __A ) -> List[Any]:
TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __lowerCAmelCase ( self , __A ) -> List[str]:
# create estimator
lowerCAmelCase_ :Any = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase_ :Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase_ :List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase_ :Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase_ :Optional[int] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __A )
| 84 | 1 |
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
__UpperCAmelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
__UpperCAmelCase = re.compile(R'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
__UpperCAmelCase = re.compile(R'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__UpperCAmelCase = re.compile(R'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
__UpperCAmelCase = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def _snake_case ( lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , lowercase__ )
return [m.group(0 ) for m in matches]
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[str] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
lowerCAmelCase_ :Optional[Any] = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
lowerCAmelCase_ :List[Any] = collections.defaultdict(lowercase__ )
lowerCAmelCase_ :Dict = collections.defaultdict(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = collections.defaultdict(lowercase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(lowercase__ ):
lowerCAmelCase_ :List[Any] = None
if _re_tf_models.match(lowercase__ ) is not None:
lowerCAmelCase_ :List[str] = tf_models
lowerCAmelCase_ :Dict = _re_tf_models.match(lowercase__ ).groups()[0]
elif _re_flax_models.match(lowercase__ ) is not None:
lowerCAmelCase_ :Tuple = flax_models
lowerCAmelCase_ :List[str] = _re_flax_models.match(lowercase__ ).groups()[0]
elif _re_pt_models.match(lowercase__ ) is not None:
lowerCAmelCase_ :List[Any] = pt_models
lowerCAmelCase_ :str = _re_pt_models.match(lowercase__ ).groups()[0]
if lookup_dict is not None:
while len(lowercase__ ) > 0:
if attr_name in model_prefix_to_model_type:
lowerCAmelCase_ :List[Any] = True
break
# Try again after removing the last word in the name
lowerCAmelCase_ :Optional[int] = """""".join(camel_case_split(lowercase__ )[:-1] )
lowerCAmelCase_ :Optional[Any] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
lowerCAmelCase_ :Dict = list(lowercase__ )
all_models.sort()
lowerCAmelCase_ :str = {"""model_type""": all_models}
lowerCAmelCase_ :Optional[int] = [pt_models[t] for t in all_models]
lowerCAmelCase_ :Dict = [tf_models[t] for t in all_models]
lowerCAmelCase_ :Optional[Any] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
lowerCAmelCase_ :Any = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
lowerCAmelCase_ :Any = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
lowerCAmelCase_ :List[str] = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
lowerCAmelCase_ :List[Any] = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
lowerCAmelCase_ :Union[str, Any] = """AutoTokenizer"""
lowerCAmelCase_ :Union[str, Any] = [processors[t] for t in all_models]
return pd.DataFrame(lowercase__ )
def _snake_case ( lowercase__ : Any ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
lowerCAmelCase_ :Dict = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
lowerCAmelCase_ :Dict = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(lowercase__ , lowercase__ , lowercase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(lowercase__ , lowercase__ ):
continue
# First extract all model_names
lowerCAmelCase_ :Optional[Any] = []
for name in getattr(lowercase__ , lowercase__ ).values():
if isinstance(lowercase__ , lowercase__ ):
model_names.append(lowercase__ )
else:
model_names.extend(list(lowercase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = get_frameworks_table()
lowerCAmelCase_ :Optional[int] = Dataset.from_pandas(lowercase__ )
lowerCAmelCase_ :Any = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=lowercase__ )
lowerCAmelCase_ :List[str] = Dataset.from_json(lowercase__ )
lowerCAmelCase_ :int = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(lowercase__ ) )
}
lowerCAmelCase_ :Dict = update_pipeline_and_auto_class_table(lowercase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
lowerCAmelCase_ :str = sorted(table.keys() )
lowerCAmelCase_ :List[str] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
lowerCAmelCase_ :List[str] = Dataset.from_pandas(lowercase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(lowercase__ , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(lowercase__ , """pipeline_tags.json""" ) )
if commit_sha is not None:
lowerCAmelCase_ :List[Any] = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
lowerCAmelCase_ :int = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=lowercase__ , repo_type="""dataset""" , token=lowercase__ , commit_message=lowercase__ , )
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Tuple = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
lowerCAmelCase_ :Optional[int] = transformers_module.pipelines.SUPPORTED_TASKS
lowerCAmelCase_ :int = []
for key in pipeline_tasks:
if key not in in_table:
lowerCAmelCase_ :Any = pipeline_tasks[key]["""pt"""]
if isinstance(lowercase__ , (list, tuple) ):
lowerCAmelCase_ :int = model[0]
lowerCAmelCase_ :List[Any] = model.__name__
if model not in in_table.values():
missing.append(lowercase__ )
if len(lowercase__ ) > 0:
lowerCAmelCase_ :Optional[int] = """, """.join(lowercase__ )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
__UpperCAmelCase = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 84 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 1_0 ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or n < 0:
raise ValueError("""Invalid input""" )
lowerCAmelCase_ :List[str] = 1_0**n
lowerCAmelCase_ :int = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowercase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 84 | 1 |
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = inspect.getfile(accelerate.test_utils )
lowerCAmelCase_ :Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
lowerCAmelCase_ :int = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
lowerCAmelCase_ :List[str] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def __lowerCAmelCase ( self ) -> List[str]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowerCAmelCase_ :str = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def __lowerCAmelCase ( self ) -> Optional[Any]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
lowerCAmelCase_ :Union[str, Any] = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.operation_file_path]
print(f"""Command: {cmd}""" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def __lowerCAmelCase ( self ) -> Union[str, Any]:
print(f"""Found {torch.cuda.device_count()} devices, using 2 devices only""" )
lowerCAmelCase_ :Optional[int] = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(__A , env=os.environ.copy() )
if __name__ == "__main__":
__UpperCAmelCase = Accelerator()
__UpperCAmelCase = (accelerator.state.process_index + 2, 10)
__UpperCAmelCase = torch.randint(0, 10, shape).to(accelerator.device)
__UpperCAmelCase = ''
__UpperCAmelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
__UpperCAmelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
__UpperCAmelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 84 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = 'src/transformers'
__UpperCAmelCase = 'docs/source/en/tasks'
def _snake_case ( lowercase__ : str , lowercase__ : List[str] , lowercase__ : Any ) -> str:
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase_ :List[Any] = f.readlines()
# Find the start prompt.
lowerCAmelCase_ :Tuple = 0
while not lines[start_index].startswith(lowercase__ ):
start_index += 1
start_index += 1
lowerCAmelCase_ :Dict = start_index
while not lines[end_index].startswith(lowercase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def _snake_case ( lowercase__ : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase_ :List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() )
lowerCAmelCase_ :Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def _snake_case ( lowercase__ : int , lowercase__ : str=False ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = _find_text_in_file(
filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
lowerCAmelCase_ :int = get_model_list_for_task(lowercase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
""" to fix this.""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 84 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 1_0_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = 1, 1
lowerCAmelCase_ :Dict = 2
while True:
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :str = fa + fa
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = fa, f
index += 1
for _ in str(lowercase__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 84 |
"""simple docstring"""
def _snake_case ( lowercase__ : list[int] ) -> list[list[int]]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = []
if len(lowercase__ ) == 1:
return [nums.copy()]
for _ in range(len(lowercase__ ) ):
lowerCAmelCase_ :Optional[Any] = nums.pop(0 )
lowerCAmelCase_ :str = permute(lowercase__ )
for perm in permutations:
perm.append(lowercase__ )
result.extend(lowercase__ )
nums.append(lowercase__ )
return result
def _snake_case ( lowercase__ : Tuple ) -> List[str]:
'''simple docstring'''
def backtrack(lowercase__ : str ):
if start == len(lowercase__ ) - 1:
output.append(nums[:] )
else:
for i in range(lowercase__ , len(lowercase__ ) ):
lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start]
backtrack(start + 1 )
lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] # backtrack
lowerCAmelCase_ :int = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__UpperCAmelCase = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 84 | 1 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[int] = "char"
UpperCAmelCase_ :Tuple = "bpe"
UpperCAmelCase_ :Optional[Any] = "wp"
__UpperCAmelCase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[str] = ["image_processor", "char_tokenizer"]
UpperCAmelCase_ :str = "ViTImageProcessor"
UpperCAmelCase_ :List[Any] = "MgpstrTokenizer"
def __init__( self , __A=None , __A=None , **__A ) -> int:
lowerCAmelCase_ :Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __A , )
lowerCAmelCase_ :List[Any] = kwargs.pop("""feature_extractor""" )
lowerCAmelCase_ :List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
lowerCAmelCase_ :Optional[Any] = tokenizer
lowerCAmelCase_ :Tuple = AutoTokenizer.from_pretrained("""gpt2""" )
lowerCAmelCase_ :Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-uncased""" )
super().__init__(__A , __A )
def __call__( self , __A=None , __A=None , __A=None , **__A ) -> Dict:
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
lowerCAmelCase_ :List[str] = self.image_processor(__A , return_tensors=__A , **__A )
if text is not None:
lowerCAmelCase_ :Tuple = self.char_tokenizer(__A , return_tensors=__A , **__A )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowerCAmelCase_ :Optional[Any] = encodings["""input_ids"""]
return inputs
def __lowerCAmelCase ( self , __A ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :int = sequences
lowerCAmelCase_ :Any = char_preds.size(0 )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = self._decode_helper(__A , """char""" )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = self._decode_helper(__A , """bpe""" )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = self._decode_helper(__A , """wp""" )
lowerCAmelCase_ :Union[str, Any] = []
lowerCAmelCase_ :str = []
for i in range(__A ):
lowerCAmelCase_ :int = [char_scores[i], bpe_scores[i], wp_scores[i]]
lowerCAmelCase_ :Optional[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
lowerCAmelCase_ :Dict = scores.index(max(__A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowerCAmelCase_ :Dict = {}
lowerCAmelCase_ :str = final_strs
lowerCAmelCase_ :str = final_scores
lowerCAmelCase_ :str = char_strs
lowerCAmelCase_ :Any = bpe_strs
lowerCAmelCase_ :List[Any] = wp_strs
return out
def __lowerCAmelCase ( self , __A , __A ) -> Union[str, Any]:
if format == DecodeType.CHARACTER:
lowerCAmelCase_ :List[str] = self.char_decode
lowerCAmelCase_ :Optional[int] = 1
lowerCAmelCase_ :Dict = """[s]"""
elif format == DecodeType.BPE:
lowerCAmelCase_ :str = self.bpe_decode
lowerCAmelCase_ :int = 2
lowerCAmelCase_ :Optional[Any] = """#"""
elif format == DecodeType.WORDPIECE:
lowerCAmelCase_ :Optional[int] = self.wp_decode
lowerCAmelCase_ :Tuple = 102
lowerCAmelCase_ :Union[str, Any] = """[SEP]"""
else:
raise ValueError(f"""Format {format} is not supported.""" )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = [], []
lowerCAmelCase_ :List[str] = pred_logits.size(0 )
lowerCAmelCase_ :Any = pred_logits.size(1 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = pred_logits.topk(1 , dim=-1 , largest=__A , sorted=__A )
lowerCAmelCase_ :Any = preds_index.view(-1 , __A )[:, 1:]
lowerCAmelCase_ :str = decoder(__A )
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = torch.nn.functional.softmax(__A , dim=2 ).max(dim=2 )
lowerCAmelCase_ :Optional[Any] = preds_max_prob[:, 1:]
for index in range(__A ):
lowerCAmelCase_ :Optional[int] = preds_str[index].find(__A )
lowerCAmelCase_ :Optional[int] = preds_str[index][:pred_eos]
lowerCAmelCase_ :List[str] = preds_index[index].cpu().tolist()
lowerCAmelCase_ :List[str] = pred_index.index(__A ) if eos_token in pred_index else -1
lowerCAmelCase_ :List[Any] = preds_max_prob[index][: pred_eos_index + 1]
lowerCAmelCase_ :List[Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__A )
conf_scores.append(__A )
return dec_strs, conf_scores
def __lowerCAmelCase ( self , __A ) -> List[str]:
lowerCAmelCase_ :Optional[int] = [seq.replace(""" """ , """""" ) for seq in self.char_tokenizer.batch_decode(__A )]
return decode_strs
def __lowerCAmelCase ( self , __A ) -> Tuple:
return self.bpe_tokenizer.batch_decode(__A )
def __lowerCAmelCase ( self , __A ) -> List[str]:
lowerCAmelCase_ :List[Any] = [seq.replace(""" """ , """""" ) for seq in self.wp_tokenizer.batch_decode(__A )]
return decode_strs
| 84 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = BioGptTokenizer
UpperCAmelCase_ :str = False
def __lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ :Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) )
lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[Any] = """lower newer"""
lowerCAmelCase_ :Tuple = """lower newer"""
return input_text, output_text
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase_ :Union[str, Any] = """lower"""
lowerCAmelCase_ :Any = ["""low""", """er</w>"""]
lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCAmelCase_ :Dict = tokens + ["""<unk>"""]
lowerCAmelCase_ :List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 84 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'sail/poolformer_s12': 'https://huggingface.co/sail/poolformer_s12/resolve/main/config.json',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = "poolformer"
def __init__( self , __A=3 , __A=16 , __A=16 , __A=3 , __A=4.0 , __A=[2, 2, 6, 2] , __A=[64, 128, 320, 512] , __A=[7, 3, 3, 3] , __A=[4, 2, 2, 2] , __A=[2, 1, 1, 1] , __A=4 , __A=0.0 , __A="gelu" , __A=True , __A=1E-5 , __A=0.0_2 , **__A , ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = num_channels
lowerCAmelCase_ :Tuple = patch_size
lowerCAmelCase_ :Tuple = stride
lowerCAmelCase_ :Optional[int] = padding
lowerCAmelCase_ :int = pool_size
lowerCAmelCase_ :List[str] = hidden_sizes
lowerCAmelCase_ :str = mlp_ratio
lowerCAmelCase_ :Optional[int] = depths
lowerCAmelCase_ :str = patch_sizes
lowerCAmelCase_ :Optional[int] = strides
lowerCAmelCase_ :Optional[int] = num_encoder_blocks
lowerCAmelCase_ :Dict = drop_path_rate
lowerCAmelCase_ :int = hidden_act
lowerCAmelCase_ :Optional[int] = use_layer_scale
lowerCAmelCase_ :Tuple = layer_scale_init_value
lowerCAmelCase_ :List[Any] = initializer_range
super().__init__(**__A )
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 2E-3
| 84 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "bert-generation"
def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCAmelCase_ :Any = vocab_size
lowerCAmelCase_ :List[Any] = hidden_size
lowerCAmelCase_ :Optional[int] = num_hidden_layers
lowerCAmelCase_ :int = num_attention_heads
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :Optional[Any] = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_dropout_prob
lowerCAmelCase_ :int = attention_probs_dropout_prob
lowerCAmelCase_ :Tuple = max_position_embeddings
lowerCAmelCase_ :List[str] = initializer_range
lowerCAmelCase_ :Union[str, Any] = layer_norm_eps
lowerCAmelCase_ :List[str] = position_embedding_type
lowerCAmelCase_ :Optional[int] = use_cache
| 84 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int:
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
lowerCAmelCase_ :Optional[int] = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
"""simple docstring"""
def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [False] * len(lowercase__ )
lowerCAmelCase_ :str = []
queue.append(lowercase__ )
lowerCAmelCase_ :Any = True
while queue:
lowerCAmelCase_ :Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = True
lowerCAmelCase_ :int = u
return visited[t]
def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ ))
lowerCAmelCase_ :str = 0
while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowerCAmelCase_ :List[str] = float("""Inf""" )
lowerCAmelCase_ :List[str] = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] )
lowerCAmelCase_ :Union[str, Any] = parent[s]
max_flow += path_flow
lowerCAmelCase_ :Tuple = sink
while v != source:
lowerCAmelCase_ :List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase_ :Union[str, Any] = parent[v]
return max_flow
__UpperCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__UpperCAmelCase , __UpperCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 84 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _SCREAMING_SNAKE_CASE ( A__ ):
@staticmethod
@abstractmethod
def __lowerCAmelCase ( __A ) -> Optional[int]:
raise NotImplementedError()
@abstractmethod
def __lowerCAmelCase ( self ) -> List[Any]:
raise NotImplementedError()
| 84 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = 1_0
lowerCAmelCase_ :Optional[int] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
lowerCAmelCase_ :int = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
lowerCAmelCase_ :List[Any] = FILE_CONTENT
with open(lowercase__ , """w""" ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[Any] ) -> Tuple:
'''simple docstring'''
import bza
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" )
with bza.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
import gzip
lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" )
with gzip.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Optional[int]:
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" )
with lza.frame.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
import tarfile
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> str:
'''simple docstring'''
import lzma
lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" )
with lzma.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
import zipfile
lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> Tuple:
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" )
with zstd.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
lowerCAmelCase_ :Any = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase__ , """w""" ) as f:
f.write(lowercase__ )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ )
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
lowerCAmelCase_ :Union[str, Any] = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase__ , """w""" , newline="""""" ) as f:
lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase__ , """w""" , newline="""""" ) as f:
lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
import bza
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase__ , """rb""" ) as f:
lowerCAmelCase_ :Union[str, Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
lowerCAmelCase_ :Optional[Any] = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase__ , """wb""" ) as f:
lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA}
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]:
'''simple docstring'''
import gzip
lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase__ , """rb""" ) as orig_file:
with gzip.open(lowercase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
import gzip
lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase__ , """rb""" ) as orig_file:
with gzip.open(lowercase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> int:
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 84 | 1 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger()
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :nn.Module
UpperCAmelCase_ :List[nn.Module] = field(default_factory=A__ )
UpperCAmelCase_ :list = field(default_factory=A__ )
def __lowerCAmelCase ( self , __A , __A , __A ) -> List[Any]:
lowerCAmelCase_ :Optional[int] = len(list(m.modules() ) ) == 1 or isinstance(__A , nn.Convad ) or isinstance(__A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__A )
def __call__( self , __A ) -> Union[str, Any]:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__A )
[x.remove() for x in self.handles]
return self
@property
def __lowerCAmelCase ( self ) -> List[str]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda __A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :nn.Module
UpperCAmelCase_ :nn.Module
UpperCAmelCase_ :int = 0
UpperCAmelCase_ :List = field(default_factory=A__ )
UpperCAmelCase_ :List = field(default_factory=A__ )
def __call__( self , __A ) -> List[str]:
lowerCAmelCase_ :Tuple = Tracker(self.dest )(__A ).parametrized
lowerCAmelCase_ :Optional[Any] = Tracker(self.src )(__A ).parametrized
lowerCAmelCase_ :Optional[Any] = list(filter(lambda __A : type(__A ) not in self.src_skip , __A ) )
lowerCAmelCase_ :Dict = list(filter(lambda __A : type(__A ) not in self.dest_skip , __A ) )
if len(__A ) != len(__A ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(__A )} operations while"""
f""" destination module has {len(__A )}.""" )
for dest_m, src_m in zip(__A , __A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def _snake_case ( lowercase__ : str , lowercase__ : ResNetConfig , lowercase__ : Path , lowercase__ : bool = True ) -> Union[str, Any]:
'''simple docstring'''
print(f"""Converting {name}...""" )
with torch.no_grad():
lowerCAmelCase_ :int = timm.create_model(lowercase__ , pretrained=lowercase__ ).eval()
lowerCAmelCase_ :Tuple = ResNetForImageClassification(lowercase__ ).eval()
lowerCAmelCase_ :List[Any] = ModuleTransfer(src=lowercase__ , dest=lowercase__ )
lowerCAmelCase_ :str = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(lowercase__ )
assert torch.allclose(from_model(lowercase__ ) , our_model(lowercase__ ).logits ), "The model logits don't match the original one."
lowerCAmelCase_ :Optional[Any] = f"""resnet{"-".join(name.split("resnet" ) )}"""
print(lowercase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=lowercase__ , )
# we can use the convnext one
lowerCAmelCase_ :Optional[int] = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , )
print(f"""Pushed {checkpoint_name}""" )
def _snake_case ( lowercase__ : Path , lowercase__ : str = None , lowercase__ : bool = True ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = """imagenet-1k-id2label.json"""
lowerCAmelCase_ :Optional[Any] = 1_0_0_0
lowerCAmelCase_ :Union[str, Any] = (1, num_labels)
lowerCAmelCase_ :str = """huggingface/label-files"""
lowerCAmelCase_ :Optional[Any] = num_labels
lowerCAmelCase_ :Dict = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase_ :int = {int(lowercase__ ): v for k, v in idalabel.items()}
lowerCAmelCase_ :Dict = idalabel
lowerCAmelCase_ :Dict = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ :Any = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ )
lowerCAmelCase_ :str = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(lowercase__ , names_to_config[model_name] , lowercase__ , lowercase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return config, expected_shape
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 84 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[Any] = "data2vec-text"
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCAmelCase_ :Dict = vocab_size
lowerCAmelCase_ :Dict = hidden_size
lowerCAmelCase_ :int = num_hidden_layers
lowerCAmelCase_ :List[Any] = num_attention_heads
lowerCAmelCase_ :Any = hidden_act
lowerCAmelCase_ :Optional[int] = intermediate_size
lowerCAmelCase_ :str = hidden_dropout_prob
lowerCAmelCase_ :Any = attention_probs_dropout_prob
lowerCAmelCase_ :str = max_position_embeddings
lowerCAmelCase_ :int = type_vocab_size
lowerCAmelCase_ :Tuple = initializer_range
lowerCAmelCase_ :List[Any] = layer_norm_eps
lowerCAmelCase_ :List[Any] = position_embedding_type
lowerCAmelCase_ :List[Any] = use_cache
lowerCAmelCase_ :List[Any] = classifier_dropout
class _SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 84 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCAmelCase_ :int = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCAmelCase_ :List[str] = get_activation("""gelu""" )
lowerCAmelCase_ :Any = get_activation("""gelu_10""" )
lowerCAmelCase_ :Tuple = torch_builtin(__A )
lowerCAmelCase_ :Optional[Any] = geluaa(__A )
lowerCAmelCase_ :Union[str, Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(__A ):
get_activation("""bogus""" )
with self.assertRaises(__A ):
get_activation(__A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Any = get_activation("""gelu""" )
lowerCAmelCase_ :Optional[Any] = 1
lowerCAmelCase_ :int = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
lowerCAmelCase_ :str = acta.a
| 84 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowerCAmelCase_ :Tuple = (wi_a, wi_a)
else:
lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ :List[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ :Optional[Any] = layer_norm
lowerCAmelCase_ :Any = k.T
lowerCAmelCase_ :Tuple = o.T
lowerCAmelCase_ :Tuple = q.T
lowerCAmelCase_ :str = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = layer_norm
if split_mlp_wi:
lowerCAmelCase_ :List[Any] = wi[0].T
lowerCAmelCase_ :Dict = wi[1].T
else:
lowerCAmelCase_ :int = wi.T
lowerCAmelCase_ :List[str] = wo.T
lowerCAmelCase_ :Tuple = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ :List[Any] = layer_norm
lowerCAmelCase_ :List[str] = k.T
lowerCAmelCase_ :Any = o.T
lowerCAmelCase_ :Any = q.T
lowerCAmelCase_ :Dict = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ :Optional[int] = layer_norm
lowerCAmelCase_ :str = k.T
lowerCAmelCase_ :Tuple = o.T
lowerCAmelCase_ :Any = q.T
lowerCAmelCase_ :int = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ :List[Any] = layer_norm
if split_mlp_wi:
lowerCAmelCase_ :Any = wi[0].T
lowerCAmelCase_ :Any = wi[1].T
else:
lowerCAmelCase_ :Tuple = wi.T
lowerCAmelCase_ :List[str] = wo.T
lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""]
lowerCAmelCase_ :Optional[Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T
return new
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ :Any = state_dict["""shared.weight"""]
return state_dict
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 84 | 1 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" )
if "norm" in key:
lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" )
if "layer_norm1" in key:
lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" )
if "attn.q" in key:
lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" )
if "bot_conv" in key:
lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCAmelCase_ :List[Any] = value
return new_state_dict
def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ :Optional[Any] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ :List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :]
def _snake_case ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCAmelCase_ :List[Any] = prepare_img()
lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ )
# key and value matrices need special treatment
read_in_k_v(lowercase__ , lowercase__ )
# create HuggingFace model and load state dict
lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# forward pass
lowerCAmelCase_ :Dict = model(lowercase__ )
lowerCAmelCase_ :Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase_ :Optional[Any] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
lowerCAmelCase_ :Any = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__UpperCAmelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 84 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" )
if "norm" in key:
lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" )
if "layer_norm1" in key:
lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" )
if "attn.q" in key:
lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" )
if "bot_conv" in key:
lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCAmelCase_ :List[Any] = value
return new_state_dict
def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ :Optional[Any] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ :List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :]
def _snake_case ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCAmelCase_ :List[Any] = prepare_img()
lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ )
# key and value matrices need special treatment
read_in_k_v(lowercase__ , lowercase__ )
# create HuggingFace model and load state dict
lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# forward pass
lowerCAmelCase_ :Dict = model(lowercase__ )
lowerCAmelCase_ :Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase_ :Optional[Any] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
lowerCAmelCase_ :Any = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__UpperCAmelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 84 | 1 |
"""simple docstring"""
__UpperCAmelCase = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__UpperCAmelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
__UpperCAmelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 84 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 1 |
"""simple docstring"""
import datasets
__UpperCAmelCase = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n'
__UpperCAmelCase = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n'
__UpperCAmelCase = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n'
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def __lowerCAmelCase ( self , __A , __A ) -> Dict:
return {"accuracy": simple_accuracy(__A , __A )}
| 84 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "levit"
def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any:
super().__init__(**__A )
lowerCAmelCase_ :Tuple = image_size
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :Union[str, Any] = kernel_size
lowerCAmelCase_ :Optional[Any] = stride
lowerCAmelCase_ :Optional[int] = padding
lowerCAmelCase_ :Optional[Any] = hidden_sizes
lowerCAmelCase_ :Optional[int] = num_attention_heads
lowerCAmelCase_ :int = depths
lowerCAmelCase_ :List[str] = key_dim
lowerCAmelCase_ :str = drop_path_rate
lowerCAmelCase_ :Optional[int] = patch_size
lowerCAmelCase_ :Union[str, Any] = attention_ratio
lowerCAmelCase_ :Dict = mlp_ratio
lowerCAmelCase_ :Any = initializer_range
lowerCAmelCase_ :Optional[int] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-4
| 84 | 1 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'The `inpainting.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionInpaintPipeline` instead.'
)
| 84 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( lowercase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCAmelCase_ :Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCAmelCase_ :Dict = 0.01
with locka.acquire():
with pytest.raises(lowercase__ ):
lowerCAmelCase_ :List[Any] = time.time()
locka.acquire(lowercase__ )
assert time.time() - _start > timeout
def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = """a""" * 1_0_0_0 + """.lock"""
lowerCAmelCase_ :Optional[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowercase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
lowerCAmelCase_ :Any = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase__ ):
locka.acquire(0 )
| 84 | 1 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 1_0_0_0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = 2**power
lowerCAmelCase_ :List[Any] = str(lowercase__ )
lowerCAmelCase_ :Tuple = list(lowercase__ )
lowerCAmelCase_ :Tuple = 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
__UpperCAmelCase = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
__UpperCAmelCase = solution(power)
print('Sum of the digits is: ', result)
| 84 |
"""simple docstring"""
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__UpperCAmelCase = 1.054571817e-34 # unit of ℏ : J * s
__UpperCAmelCase = 3e8 # unit of c : m * s^-1
def _snake_case ( lowercase__ : float , lowercase__ : float , lowercase__ : float ) -> dict[str, float]:
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if force < 0:
raise ValueError("""Magnitude of force can not be negative""" )
if distance < 0:
raise ValueError("""Distance can not be negative""" )
if area < 0:
raise ValueError("""Area can not be negative""" )
if force == 0:
lowerCAmelCase_ :Union[str, Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCAmelCase_ :Optional[Any] = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCAmelCase_ :Any = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("""One and only one argument must be 0""" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 1 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __A , __A=13 , __A=30 , __A=2 , __A=3 , __A=True , __A=True , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.0_2 , ) -> List[str]:
lowerCAmelCase_ :Optional[int] = parent
lowerCAmelCase_ :Any = batch_size
lowerCAmelCase_ :str = image_size
lowerCAmelCase_ :Any = patch_size
lowerCAmelCase_ :Optional[Any] = num_channels
lowerCAmelCase_ :str = is_training
lowerCAmelCase_ :int = use_labels
lowerCAmelCase_ :str = hidden_size
lowerCAmelCase_ :str = num_hidden_layers
lowerCAmelCase_ :Optional[int] = num_attention_heads
lowerCAmelCase_ :str = intermediate_size
lowerCAmelCase_ :int = hidden_act
lowerCAmelCase_ :int = hidden_dropout_prob
lowerCAmelCase_ :Any = attention_probs_dropout_prob
lowerCAmelCase_ :Dict = type_sequence_label_size
lowerCAmelCase_ :List[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase_ :Optional[int] = (image_size // patch_size) ** 2
lowerCAmelCase_ :Optional[Any] = num_patches + 1
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ :Tuple = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , )
return config, pixel_values
def __lowerCAmelCase ( self , __A , __A ) -> List[Any]:
lowerCAmelCase_ :List[str] = FlaxViTModel(config=__A )
lowerCAmelCase_ :List[Any] = model(__A )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase_ :List[str] = (self.image_size, self.image_size)
lowerCAmelCase_ :Any = (self.patch_size, self.patch_size)
lowerCAmelCase_ :List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A ) -> List[str]:
lowerCAmelCase_ :List[Any] = self.type_sequence_label_size
lowerCAmelCase_ :List[Any] = FlaxViTForImageClassification(config=__A )
lowerCAmelCase_ :int = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase_ :int = 1
lowerCAmelCase_ :Tuple = FlaxViTForImageClassification(__A )
lowerCAmelCase_ :Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase_ :Tuple = model(__A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :str = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :List[Any] = config_and_inputs
lowerCAmelCase_ :Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[int] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __lowerCAmelCase ( self ) -> None:
lowerCAmelCase_ :Optional[int] = FlaxViTModelTester(self )
lowerCAmelCase_ :Union[str, Any] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> int:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :str = model_class(__A )
lowerCAmelCase_ :int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ :List[Any] = [*signature.parameters.keys()]
lowerCAmelCase_ :Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase_ :Dict = self._prepare_for_class(__A , __A )
lowerCAmelCase_ :Optional[int] = model_class(__A )
@jax.jit
def model_jitted(__A , **__A ):
return model(pixel_values=__A , **__A )
with self.subTest("""JIT Enabled""" ):
lowerCAmelCase_ :Optional[int] = model_jitted(**__A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowerCAmelCase_ :Tuple = model_jitted(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) )
for jitted_output, output in zip(__A , __A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
lowerCAmelCase_ :Dict = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
lowerCAmelCase_ :str = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(__A )
| 84 |
"""simple docstring"""
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int:
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
lowerCAmelCase_ :Optional[int] = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 | 1 |
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__UpperCAmelCase = logging.getLogger(__name__)
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=lowercase__ , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=lowercase__ , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=lowercase__ , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=lowercase__ , default="""data/dump""" , help="""The dump file prefix.""" )
lowerCAmelCase_ :Any = parser.parse_args()
logger.info(f"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
lowerCAmelCase_ :Tuple = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ :Optional[int] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
lowerCAmelCase_ :Optional[Any] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase_ :Any = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ :str = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
lowerCAmelCase_ :Dict = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase_ :Optional[int] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ :Any = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
lowerCAmelCase_ :Dict = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f"""Loading text from {args.file_path}""" )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
lowerCAmelCase_ :List[Any] = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f"""{len(lowercase__ )} examples to process.""" )
lowerCAmelCase_ :List[str] = []
lowerCAmelCase_ :str = 0
lowerCAmelCase_ :Tuple = 1_0_0_0_0
lowerCAmelCase_ :Union[str, Any] = time.time()
for text in data:
lowerCAmelCase_ :Any = f"""{bos} {text.strip()} {sep}"""
lowerCAmelCase_ :List[Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
rslt.append(lowercase__ )
iter += 1
if iter % interval == 0:
lowerCAmelCase_ :Optional[Any] = time.time()
logger.info(f"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
lowerCAmelCase_ :int = time.time()
logger.info("""Finished binarization""" )
logger.info(f"""{len(lowercase__ )} examples processed.""" )
lowerCAmelCase_ :Optional[int] = f"""{args.dump_file}.{args.tokenizer_name}.pickle"""
lowerCAmelCase_ :Dict = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
lowerCAmelCase_ :Any = [np.uintaa(lowercase__ ) for d in rslt]
else:
lowerCAmelCase_ :str = [np.intaa(lowercase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"""Dump to {dp_file}""" )
with open(lowercase__ , """wb""" ) as handle:
pickle.dump(rslt_ , lowercase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 84 |
"""simple docstring"""
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ :int = nn.ModuleList(__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ):
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = controlnet(
__A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , )
# merge samples
if i == 0:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = down_samples, mid_sample
else:
lowerCAmelCase_ :str = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(__A , __A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Optional[Any]:
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
__A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , )
idx += 1
lowerCAmelCase_ :Any = model_path_to_save + f"""_{idx}"""
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> List[Any]:
lowerCAmelCase_ :int = 0
lowerCAmelCase_ :Dict = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowerCAmelCase_ :List[Any] = pretrained_model_path
while os.path.isdir(__A ):
lowerCAmelCase_ :Tuple = ControlNetModel.from_pretrained(__A , **__A )
controlnets.append(__A )
idx += 1
lowerCAmelCase_ :Dict = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" )
if len(__A ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(__A )
| 84 | 1 |
"""simple docstring"""
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = text, pattern
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = len(__A ), len(__A )
def __lowerCAmelCase ( self , __A ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __lowerCAmelCase ( self , __A ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __lowerCAmelCase ( self ) -> list[int]:
# searches pattern in text and returns index positions
lowerCAmelCase_ :List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCAmelCase_ :Any = self.mismatch_in_text(__A )
if mismatch_index == -1:
positions.append(__A )
else:
lowerCAmelCase_ :int = self.match_in_pattern(self.text[mismatch_index] )
lowerCAmelCase_ :Any = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__UpperCAmelCase = 'ABAABA'
__UpperCAmelCase = 'AB'
__UpperCAmelCase = BoyerMooreSearch(text, pattern)
__UpperCAmelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 84 |
"""simple docstring"""
from PIL import Image
def _snake_case ( lowercase__ : Image , lowercase__ : float ) -> Image:
'''simple docstring'''
def brightness(lowercase__ : int ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowercase__ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
__UpperCAmelCase = change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 84 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__UpperCAmelCase = (7_20, 12_80) # Height, Width
__UpperCAmelCase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__UpperCAmelCase = 1 / 1_00
__UpperCAmelCase = ''
__UpperCAmelCase = ''
__UpperCAmelCase = ''
__UpperCAmelCase = 2_50
def _snake_case ( ) -> None:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = get_dataset(lowercase__ , lowercase__ )
for index in range(lowercase__ ):
lowerCAmelCase_ :Union[str, Any] = random.sample(range(len(lowercase__ ) ) , 4 )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[str] = update_image_and_anno(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , filter_scale=lowercase__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCAmelCase_ :Any = random_chars(3_2 )
lowerCAmelCase_ :Any = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowerCAmelCase_ :str = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , lowercase__ , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
lowerCAmelCase_ :Union[str, Any] = []
for anno in new_annos:
lowerCAmelCase_ :str = anno[3] - anno[1]
lowerCAmelCase_ :Union[str, Any] = anno[4] - anno[2]
lowerCAmelCase_ :Optional[Any] = anno[1] + width / 2
lowerCAmelCase_ :Any = anno[2] + height / 2
lowerCAmelCase_ :Optional[Any] = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(lowercase__ )
with open(f"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> tuple[list, list]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = []
lowerCAmelCase_ :Optional[int] = []
for label_file in glob.glob(os.path.join(lowercase__ , """*.txt""" ) ):
lowerCAmelCase_ :Any = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(lowercase__ ) as in_file:
lowerCAmelCase_ :Any = in_file.readlines()
lowerCAmelCase_ :Union[str, Any] = os.path.join(lowercase__ , f"""{label_name}.jpg""" )
lowerCAmelCase_ :List[Any] = []
for obj_list in obj_lists:
lowerCAmelCase_ :Any = obj_list.rstrip("""\n""" ).split(""" """ )
lowerCAmelCase_ :str = float(obj[1] ) - float(obj[3] ) / 2
lowerCAmelCase_ :Dict = float(obj[2] ) - float(obj[4] ) / 2
lowerCAmelCase_ :Union[str, Any] = float(obj[1] ) + float(obj[3] ) / 2
lowerCAmelCase_ :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase__ )
labels.append(lowercase__ )
return img_paths, labels
def _snake_case ( lowercase__ : list , lowercase__ : list , lowercase__ : list[int] , lowercase__ : tuple[int, int] , lowercase__ : tuple[float, float] , lowercase__ : float = 0.0 , ) -> tuple[list, list, str]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowerCAmelCase_ :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase_ :Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase_ :Union[str, Any] = int(scale_x * output_size[1] )
lowerCAmelCase_ :Tuple = int(scale_y * output_size[0] )
lowerCAmelCase_ :Dict = []
lowerCAmelCase_ :Optional[Any] = []
for i, index in enumerate(lowercase__ ):
lowerCAmelCase_ :int = all_img_list[index]
path_list.append(lowercase__ )
lowerCAmelCase_ :Tuple = all_annos[index]
lowerCAmelCase_ :Optional[Any] = cva.imread(lowercase__ )
if i == 0: # top-left
lowerCAmelCase_ :Optional[int] = cva.resize(lowercase__ , (divid_point_x, divid_point_y) )
lowerCAmelCase_ :Optional[int] = img
for bbox in img_annos:
lowerCAmelCase_ :Union[str, Any] = bbox[1] * scale_x
lowerCAmelCase_ :Any = bbox[2] * scale_y
lowerCAmelCase_ :str = bbox[3] * scale_x
lowerCAmelCase_ :Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowerCAmelCase_ :List[Any] = cva.resize(lowercase__ , (output_size[1] - divid_point_x, divid_point_y) )
lowerCAmelCase_ :Dict = img
for bbox in img_annos:
lowerCAmelCase_ :int = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase_ :Optional[Any] = bbox[2] * scale_y
lowerCAmelCase_ :Any = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase_ :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowerCAmelCase_ :Any = cva.resize(lowercase__ , (divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase_ :Union[str, Any] = img
for bbox in img_annos:
lowerCAmelCase_ :Tuple = bbox[1] * scale_x
lowerCAmelCase_ :Dict = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase_ :Optional[Any] = bbox[3] * scale_x
lowerCAmelCase_ :Tuple = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowerCAmelCase_ :Tuple = cva.resize(
lowercase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase_ :Union[str, Any] = img
for bbox in img_annos:
lowerCAmelCase_ :str = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase_ :Any = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase_ :Any = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase_ :str = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowerCAmelCase_ :Any = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _snake_case ( lowercase__ : int ) -> str:
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
lowerCAmelCase_ :Optional[int] = ascii_lowercase + digits
return "".join(random.choice(lowercase__ ) for _ in range(lowercase__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 84 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _SCREAMING_SNAKE_CASE :
def __lowerCAmelCase ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ :int = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :List[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase_ :str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase_ :int = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowerCAmelCase ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = TaEncoderModel.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"""ResnetDownsampleBlock2D""",
"""SimpleCrossAttnDownBlock2D""",
] , mid_block_type="""UNetMidBlock2DSimpleCrossAttn""" , up_block_types=["""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="""text""" , addition_embed_type_num_heads=2 , cross_attention_norm="""group_norm""" , resnet_time_scale_shift="""scale_shift""" , act_fn="""gelu""" , class_embed_type="""timestep""" , mid_block_scale_factor=1.4_1_4 , time_embedding_act_fn="""gelu""" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase_ :str = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , thresholding=__A , dynamic_thresholding_ratio=0.9_5 , sample_max_value=1.0 , prediction_type="""epsilon""" , variance_type="""learned_range""" , )
torch.manual_seed(0 )
lowerCAmelCase_ :Optional[int] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="""squaredcos_cap_v2""" , beta_start=0.0_0_0_1 , beta_end=0.0_2 , )
torch.manual_seed(0 )
lowerCAmelCase_ :Dict = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Dict = self.get_dummy_components()
lowerCAmelCase_ :Tuple = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Optional[int] = inputs["""prompt"""]
lowerCAmelCase_ :Optional[int] = inputs["""generator"""]
lowerCAmelCase_ :Any = inputs["""num_inference_steps"""]
lowerCAmelCase_ :Optional[int] = inputs["""output_type"""]
if "image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""image"""]
else:
lowerCAmelCase_ :int = None
if "mask_image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""mask_image"""]
else:
lowerCAmelCase_ :int = None
if "original_image" in inputs:
lowerCAmelCase_ :List[Any] = inputs["""original_image"""]
else:
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ , lowerCAmelCase_ :int = pipe.encode_prompt(__A )
# inputs with prompt converted to embeddings
lowerCAmelCase_ :List[str] = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase_ :int = image
if mask_image is not None:
lowerCAmelCase_ :Tuple = mask_image
if original_image is not None:
lowerCAmelCase_ :Optional[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(__A , __A , __A )
lowerCAmelCase_ :Optional[int] = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
lowerCAmelCase_ :Optional[int] = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__A , __A ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
lowerCAmelCase_ :Dict = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Union[str, Any] = inputs["""generator"""]
lowerCAmelCase_ :Any = inputs["""num_inference_steps"""]
lowerCAmelCase_ :Tuple = inputs["""output_type"""]
# inputs with prompt converted to embeddings
lowerCAmelCase_ :Tuple = {
"""prompt_embeds""": prompt_embeds,
"""negative_prompt_embeds""": negative_prompt_embeds,
"""generator""": generator,
"""num_inference_steps""": num_inference_steps,
"""output_type""": output_type,
}
if image is not None:
lowerCAmelCase_ :Optional[int] = image
if mask_image is not None:
lowerCAmelCase_ :str = mask_image
if original_image is not None:
lowerCAmelCase_ :Tuple = original_image
lowerCAmelCase_ :Union[str, Any] = pipe_loaded(**__A )[0]
lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max()
self.assertLess(__A , 1E-4 )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Any = self.get_dummy_components()
lowerCAmelCase_ :Optional[int] = self.pipeline_class(**__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[int] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :Dict = pipe(**__A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__A )
lowerCAmelCase_ :Any = self.pipeline_class.from_pretrained(__A )
pipe_loaded.to(__A )
pipe_loaded.set_progress_bar_config(disable=__A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase_ :List[Any] = self.get_dummy_inputs(__A )
lowerCAmelCase_ :str = pipe_loaded(**__A )[0]
lowerCAmelCase_ :Dict = np.abs(to_np(__A ) - to_np(__A ) ).max()
self.assertLess(__A , 1E-4 )
| 84 | 1 |
"""simple docstring"""
from math import factorial
__UpperCAmelCase = {str(d): factorial(d) for d in range(10)}
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
return sum(DIGIT_FACTORIAL[d] for d in str(lowercase__ ) )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[str] = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , lowercase__ ) if sum_of_digit_factorial(lowercase__ ) == i )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 84 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCAmelCase_ :int = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :List[Any] = jax.device_count()
lowerCAmelCase_ :Optional[Any] = num_samples * [prompt]
lowerCAmelCase_ :int = sd_pipe.prepare_inputs(__A )
lowerCAmelCase_ :Optional[Any] = replicate(__A )
lowerCAmelCase_ :Union[str, Any] = shard(__A )
lowerCAmelCase_ :Optional[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase_ :Tuple = jax.random.split(__A , jax.device_count() )
lowerCAmelCase_ :Union[str, Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCAmelCase_ :Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ :Optional[int] = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Union[str, Any] = """stabilityai/stable-diffusion-2"""
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(__A , subfolder="""scheduler""" )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = FlaxStableDiffusionPipeline.from_pretrained(
__A , scheduler=__A , revision="""bf16""" , dtype=jnp.bfloataa , )
lowerCAmelCase_ :Optional[int] = scheduler_params
lowerCAmelCase_ :List[Any] = """A painting of a squirrel eating a burger"""
lowerCAmelCase_ :Tuple = jax.device_count()
lowerCAmelCase_ :str = num_samples * [prompt]
lowerCAmelCase_ :Union[str, Any] = sd_pipe.prepare_inputs(__A )
lowerCAmelCase_ :Tuple = replicate(__A )
lowerCAmelCase_ :Optional[int] = shard(__A )
lowerCAmelCase_ :List[str] = jax.random.PRNGKey(0 )
lowerCAmelCase_ :List[Any] = jax.random.split(__A , jax.device_count() )
lowerCAmelCase_ :Optional[Any] = sd_pipe(__A , __A , __A , num_inference_steps=25 , jit=__A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
lowerCAmelCase_ :List[str] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase_ :List[str] = images[0, 253:256, 253:256, -1]
lowerCAmelCase_ :Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase_ :Dict = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 84 | 1 |
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :List[Any] = 3
lowerCAmelCase_ :List[Any] = 250
lowerCAmelCase_ :Any = ids_tensor((batch_size, length) , __A )
lowerCAmelCase_ :List[Any] = torch.ones((batch_size, length) , device=__A , dtype=torch.float ) / length
return input_ids, scores
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ , lowerCAmelCase_ :int = self._get_tensors(5 )
lowerCAmelCase_ :Union[str, Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__A , __A ) )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(__A , __A ) )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = self._get_tensors(10 )
self.assertTrue(criteria(__A , __A ) )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[Any] = MaxLengthCriteria(max_length=10 )
lowerCAmelCase_ , lowerCAmelCase_ :Any = self._get_tensors(5 )
self.assertFalse(criteria(__A , __A ) )
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = self._get_tensors(9 )
self.assertFalse(criteria(__A , __A ) )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(__A , __A ) )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Dict = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = self._get_tensors(5 )
self.assertFalse(criteria(__A , __A ) )
lowerCAmelCase_ , lowerCAmelCase_ :Any = self._get_tensors(9 )
self.assertFalse(criteria(__A , __A ) )
lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(__A , __A ) )
lowerCAmelCase_ :List[str] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :Any = self._get_tensors(5 )
lowerCAmelCase_ :List[Any] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__A , __A ) )
lowerCAmelCase_ :Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__A , __A ) )
def __lowerCAmelCase ( self ) -> Tuple:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase_ :Dict = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__A ) , 1 )
| 84 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _snake_case ( ) -> Generator[int, None, None]:
'''simple docstring'''
lowerCAmelCase_ :dict[int, int] = {}
lowerCAmelCase_ :int = 2
while True:
lowerCAmelCase_ :List[Any] = factor_map.pop(lowercase__ , lowercase__ )
if factor:
lowerCAmelCase_ :Optional[int] = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase_ :List[str] = factor
else:
lowerCAmelCase_ :Optional[int] = prime
yield prime
prime += 1
def _snake_case ( lowercase__ : float = 1E10 ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = sieve()
lowerCAmelCase_ :str = 1
while True:
lowerCAmelCase_ :int = next(lowercase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowercase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 84 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ : str , lowercase__ : list[str] | None = None ) -> list[list[str]]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = word_bank or []
# create a table
lowerCAmelCase_ :int = len(lowercase__ ) + 1
lowerCAmelCase_ :list[list[list[str]]] = []
for _ in range(lowercase__ ):
table.append([] )
# seed value
lowerCAmelCase_ :Optional[Any] = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(lowercase__ ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(lowercase__ )] == word:
lowerCAmelCase_ :list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(lowercase__ )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(lowercase__ )]:
combination.reverse()
return table[len(lowercase__ )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 84 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
UpperCAmelCase_ :List[Any] = "ssube/stable-diffusion-x4-upscaler-onnx"
def __lowerCAmelCase ( self , __A=0 ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = floats_tensor((1, 3, 128, 128) , rng=random.Random(__A ) )
lowerCAmelCase_ :List[Any] = torch.manual_seed(__A )
lowerCAmelCase_ :Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Optional[Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Dict = pipe(**__A ).images
lowerCAmelCase_ :Any = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :int = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :int = self.get_dummy_inputs()
lowerCAmelCase_ :List[str] = pipe(**__A ).images
lowerCAmelCase_ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :str = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Union[str, Any] = pipe(**__A ).images
lowerCAmelCase_ :Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Tuple = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Union[str, Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Optional[Any] = pipe(**__A ).images
lowerCAmelCase_ :Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Tuple = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
lowerCAmelCase_ :Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :List[Any] = self.get_dummy_inputs()
lowerCAmelCase_ :Dict = pipe(**__A ).images
lowerCAmelCase_ :Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Dict = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __lowerCAmelCase ( self ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[int] = ort.SessionOptions()
lowerCAmelCase_ :Dict = False
return options
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCAmelCase_ :Optional[Any] = init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowerCAmelCase_ :Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Union[str, Any] = """A fantasy landscape, trending on artstation"""
lowerCAmelCase_ :List[Any] = torch.manual_seed(0 )
lowerCAmelCase_ :str = pipe(
prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=10 , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :Dict = output.images
lowerCAmelCase_ :List[str] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Optional[Any] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCAmelCase_ :List[str] = init_image.resize((128, 128) )
lowerCAmelCase_ :Any = LMSDiscreteScheduler.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , subfolder="""scheduler""" )
lowerCAmelCase_ :Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"""ssube/stable-diffusion-x4-upscaler-onnx""" , scheduler=__A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__A )
lowerCAmelCase_ :Any = """A fantasy landscape, trending on artstation"""
lowerCAmelCase_ :Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase_ :List[str] = pipe(
prompt=__A , image=__A , guidance_scale=7.5 , num_inference_steps=20 , generator=__A , output_type="""np""" , )
lowerCAmelCase_ :int = output.images
lowerCAmelCase_ :List[Any] = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase_ :Union[str, Any] = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 84 | 1 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :int = XLNetTokenizer
UpperCAmelCase_ :Optional[int] = XLNetTokenizerFast
UpperCAmelCase_ :Optional[int] = True
UpperCAmelCase_ :int = True
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase_ :Optional[int] = XLNetTokenizer(__A , keep_accents=__A )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = """<s>"""
lowerCAmelCase_ :int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<eod>""" )
self.assertEqual(len(__A ) , 1006 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Any = XLNetTokenizer(__A , keep_accents=__A )
lowerCAmelCase_ :Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] )
lowerCAmelCase_ :Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCAmelCase_ :int = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
lowerCAmelCase_ :List[str] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Tuple = XLNetTokenizer(__A , do_lower_case=__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""▁he""", """ll""", """o"""] )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[Any] = XLNetTokenizer(__A , do_lower_case=__A )
lowerCAmelCase_ :List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = XLNetTokenizer.from_pretrained("""xlnet-base-cased""" )
lowerCAmelCase_ :Dict = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
lowerCAmelCase_ :int = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
lowerCAmelCase_ :int = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase_ :Optional[Any] = tokenizer.build_inputs_with_special_tokens(__A , __A )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __lowerCAmelCase ( self ) -> Dict:
# fmt: off
lowerCAmelCase_ :Union[str, Any] = {"""input_ids""": [[17, 2_1442, 270, 17, 10, 1_4645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 2_2018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 1_4431, 13, 5500, 11, 1176, 580, 13, 1_6819, 4797, 23, 17, 10, 1_7135, 658, 19, 457, 7932, 13, 184, 19, 3154, 1_7135, 6468, 19, 1404, 1_2269, 19, 4229, 5356, 1_6264, 46, 19, 17, 2_0545, 1_0395, 9, 9, 9, 11, 28, 6421, 9531, 2_0729, 17, 10, 353, 1_7022, 11, 21, 6421, 9531, 1_6949, 17, 10, 1_1509, 753, 11, 33, 95, 2421, 7385, 956, 1_4431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 2_4738, 19, 1_3203, 658, 218, 787, 21, 430, 1_8482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 2_2178, 27, 1064, 22, 956, 13, 1_1101, 1429, 5854, 2_4313, 1_8953, 40, 422, 2_4366, 68, 1758, 37, 1_0483, 1_4257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 1_3894, 3380, 23, 95, 18, 1_7634, 2288, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 84 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Dict:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__A , )
assert hasattr(self , """env""" )
def __lowerCAmelCase ( self , __A ) -> Any:
# configuration for running training on smdistributed Model Parallel
lowerCAmelCase_ :Union[str, Any] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
lowerCAmelCase_ :Tuple = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
lowerCAmelCase_ :Any = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
lowerCAmelCase_ :Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=__A , instance_type=self.instance_type , debugger_hook_config=__A , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=__A , py_version="""py36""" , )
def __lowerCAmelCase ( self , __A ) -> List[Any]:
TrainingJobAnalytics(__A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __lowerCAmelCase ( self , __A ) -> List[str]:
# create estimator
lowerCAmelCase_ :Any = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
lowerCAmelCase_ :Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCAmelCase_ :List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
lowerCAmelCase_ :Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCAmelCase_ :Optional[int] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __A )
| 84 | 1 |
"""simple docstring"""
import os
import string
import sys
__UpperCAmelCase = 1 << 8
__UpperCAmelCase = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
__UpperCAmelCase = KEYMAP['up']
__UpperCAmelCase = KEYMAP['left']
if sys.platform == "win32":
__UpperCAmelCase = []
__UpperCAmelCase = {
B'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
B'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
__UpperCAmelCase = ord(str(i))
def _snake_case ( ) -> int:
'''simple docstring'''
if os.name == "nt":
import msvcrt
lowerCAmelCase_ :Union[str, Any] = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
lowerCAmelCase_ :List[Any] = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
lowerCAmelCase_ :Any = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
lowerCAmelCase_ :int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
lowerCAmelCase_ :Tuple = chr(KEYMAP["""esc"""] )
except KeyError:
lowerCAmelCase_ :int = cha[1]
else:
lowerCAmelCase_ :Dict = ch.decode(lowercase__ )
else:
lowerCAmelCase_ :Dict = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
lowerCAmelCase_ :int = sys.stdin.fileno()
lowerCAmelCase_ :Any = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
lowerCAmelCase_ :Any = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def _snake_case ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :int = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
lowerCAmelCase_ :Optional[Any] = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
lowerCAmelCase_ :Optional[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 84 |
"""simple docstring"""
def _snake_case ( lowercase__ : int = 1_0 ) -> str:
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ) or n < 0:
raise ValueError("""Invalid input""" )
lowerCAmelCase_ :List[str] = 1_0**n
lowerCAmelCase_ :int = 2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , lowercase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(10) = }""")
| 84 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
lowerCAmelCase_ :str = [[1, 2, 4], [1, 2, 3, 4]]
lowerCAmelCase_ :str = DisjunctiveConstraint(__A )
self.assertTrue(isinstance(dc.token_ids , __A ) )
with self.assertRaises(__A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self ) -> int:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
lowerCAmelCase_ :Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__A ):
DisjunctiveConstraint(__A ) # fails here
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[Any] = [[1, 2, 3], [1, 2, 4]]
lowerCAmelCase_ :Union[str, Any] = DisjunctiveConstraint(__A )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = dc.update(1 )
lowerCAmelCase_ :List[str] = stepped is True and completed is False and reset is False
self.assertTrue(__A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Dict = dc.update(2 )
lowerCAmelCase_ :int = stepped is True and completed is False and reset is False
self.assertTrue(__A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = dc.update(3 )
lowerCAmelCase_ :List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
lowerCAmelCase_ :str = DisjunctiveConstraint(__A )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[str] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 84 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
__UpperCAmelCase = 'src/transformers'
__UpperCAmelCase = 'docs/source/en/tasks'
def _snake_case ( lowercase__ : str , lowercase__ : List[str] , lowercase__ : Any ) -> str:
'''simple docstring'''
with open(lowercase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCAmelCase_ :List[Any] = f.readlines()
# Find the start prompt.
lowerCAmelCase_ :Tuple = 0
while not lines[start_index].startswith(lowercase__ ):
start_index += 1
start_index += 1
lowerCAmelCase_ :Dict = start_index
while not lines[end_index].startswith(lowercase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase = direct_transformers_import(TRANSFORMERS_PATH)
__UpperCAmelCase = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
__UpperCAmelCase = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def _snake_case ( lowercase__ : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase_ :List[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowercase__ , set() )
lowerCAmelCase_ :Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def _snake_case ( lowercase__ : int , lowercase__ : str=False ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = _find_text_in_file(
filename=os.path.join(lowercase__ , lowercase__ ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
lowerCAmelCase_ :int = get_model_list_for_task(lowercase__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowercase__ , lowercase__ ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
""" to fix this.""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__UpperCAmelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 84 | 1 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__UpperCAmelCase = get_logger(__name__)
__UpperCAmelCase = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _SCREAMING_SNAKE_CASE :
@add_start_docstrings(__A )
def __call__( self , __A , __A ) -> jnp.ndarray:
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _SCREAMING_SNAKE_CASE :
@add_start_docstrings(__A )
def __call__( self , __A , __A ) -> jnp.ndarray:
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _SCREAMING_SNAKE_CASE ( A__ ):
@add_start_docstrings(__A )
def __call__( self , __A , __A , __A , **__A ) -> jnp.ndarray:
for processor in self:
lowerCAmelCase_ :Any = inspect.signature(processor.__call__ ).parameters
if len(__A ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f"""Make sure that all the required parameters: {list(function_args.keys() )} for """
f"""{processor.__class__} are passed to the logits processor.""" )
lowerCAmelCase_ :str = processor(__A , __A , __A , **__A )
else:
lowerCAmelCase_ :Tuple = processor(__A , __A , __A )
return scores
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> Tuple:
if not isinstance(__A , __A ) or not (temperature > 0):
raise ValueError(f"""`temperature` has to be a strictly positive float, but is {temperature}""" )
lowerCAmelCase_ :int = temperature
def __call__( self , __A , __A , __A ) -> jnp.ndarray:
lowerCAmelCase_ :Optional[int] = scores / self.temperature
return scores
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A = -float("""Inf""" ) , __A = 1 ) -> Optional[Any]:
if not isinstance(__A , __A ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(__A , __A ) or (min_tokens_to_keep < 1):
raise ValueError(f"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
lowerCAmelCase_ :Optional[int] = top_p
lowerCAmelCase_ :Tuple = filter_value
lowerCAmelCase_ :Tuple = min_tokens_to_keep
def __call__( self , __A , __A , __A ) -> jnp.ndarray:
lowerCAmelCase_ , lowerCAmelCase_ :Tuple = lax.top_k(__A , scores.shape[-1] )
lowerCAmelCase_ :List[Any] = jnp.full_like(__A , self.filter_value )
lowerCAmelCase_ :Dict = jax.nn.softmax(__A , axis=-1 ).cumsum(axis=-1 )
lowerCAmelCase_ :int = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
lowerCAmelCase_ :Union[str, Any] = jnp.roll(__A , 1 )
score_mask |= score_mask.at[:, 0].set(__A )
# min tokens to keep
lowerCAmelCase_ :List[str] = score_mask.at[:, : self.min_tokens_to_keep].set(__A )
lowerCAmelCase_ :str = jnp.where(__A , __A , __A )
lowerCAmelCase_ :Union[str, Any] = jax.lax.sort_key_val(__A , __A )[-1]
return next_scores
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A = -float("""Inf""" ) , __A = 1 ) -> Any:
if not isinstance(__A , __A ) or top_k <= 0:
raise ValueError(f"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
lowerCAmelCase_ :Any = max(__A , __A )
lowerCAmelCase_ :List[Any] = filter_value
def __call__( self , __A , __A , __A ) -> jnp.ndarray:
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = scores.shape
lowerCAmelCase_ :List[Any] = jnp.full(batch_size * vocab_size , self.filter_value )
lowerCAmelCase_ :List[str] = min(self.top_k , scores.shape[-1] ) # Safety check
lowerCAmelCase_ , lowerCAmelCase_ :Optional[int] = lax.top_k(__A , __A )
lowerCAmelCase_ :Optional[int] = jnp.broadcast_to((jnp.arange(__A ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
lowerCAmelCase_ :Optional[int] = topk_scores.flatten()
lowerCAmelCase_ :Optional[Any] = topk_indices.flatten() + shift
lowerCAmelCase_ :Optional[int] = next_scores_flat.at[topk_indices_flat].set(__A )
lowerCAmelCase_ :Union[str, Any] = next_scores_flat.reshape(__A , __A )
return next_scores
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> Union[str, Any]:
lowerCAmelCase_ :List[str] = bos_token_id
def __call__( self , __A , __A , __A ) -> jnp.ndarray:
lowerCAmelCase_ :Dict = jnp.full(scores.shape , -float("""inf""" ) )
lowerCAmelCase_ :Any = 1 - jnp.bool_(cur_len - 1 )
lowerCAmelCase_ :Optional[int] = jnp.where(__A , new_scores.at[:, self.bos_token_id].set(0 ) , __A )
return scores
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Union[str, Any] = max_length
lowerCAmelCase_ :List[Any] = eos_token_id
def __call__( self , __A , __A , __A ) -> jnp.ndarray:
lowerCAmelCase_ :Optional[int] = jnp.full(scores.shape , -float("""inf""" ) )
lowerCAmelCase_ :int = 1 - jnp.bool_(cur_len - self.max_length + 1 )
lowerCAmelCase_ :List[str] = jnp.where(__A , new_scores.at[:, self.eos_token_id].set(0 ) , __A )
return scores
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Any:
if not isinstance(__A , __A ) or min_length < 0:
raise ValueError(f"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(__A , __A ) or eos_token_id < 0:
raise ValueError(f"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
lowerCAmelCase_ :Optional[int] = min_length
lowerCAmelCase_ :Tuple = eos_token_id
def __call__( self , __A , __A , __A ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
lowerCAmelCase_ :str = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
lowerCAmelCase_ :Optional[int] = jnp.where(__A , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , __A )
return scores
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A ) -> Union[str, Any]:
lowerCAmelCase_ :Dict = list(__A )
lowerCAmelCase_ :Tuple = begin_index
def __call__( self , __A , __A , __A ) -> Dict:
lowerCAmelCase_ :int = 1 - jnp.bool_(cur_len - self.begin_index )
lowerCAmelCase_ :Optional[int] = jnp.where(__A , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , __A )
return scores
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> Any:
lowerCAmelCase_ :Optional[Any] = list(__A )
def __call__( self , __A , __A , __A ) -> jnp.ndarray:
lowerCAmelCase_ :Union[str, Any] = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A ) -> List[str]:
lowerCAmelCase_ :List[Any] = dict(__A )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
lowerCAmelCase_ :List[Any] = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
lowerCAmelCase_ :Union[str, Any] = force_token_array.at[index].set(__A )
lowerCAmelCase_ :int = jnp.intaa(__A )
def __call__( self , __A , __A , __A ) -> jnp.ndarray:
def _force_token(__A ):
lowerCAmelCase_ :str = scores.shape[0]
lowerCAmelCase_ :List[str] = self.force_token_array[generation_idx]
lowerCAmelCase_ :int = jnp.ones_like(__A , dtype=scores.dtype ) * -float("""inf""" )
lowerCAmelCase_ :int = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
lowerCAmelCase_ :Any = lax.dynamic_update_slice(__A , __A , (0, current_token) )
return new_scores
lowerCAmelCase_ :str = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(__A ) , lambda: scores , ) , )
return scores
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = generate_config.eos_token_id
lowerCAmelCase_ :Dict = generate_config.no_timestamps_token_id
lowerCAmelCase_ :int = generate_config.no_timestamps_token_id + 1
lowerCAmelCase_ :List[str] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(__A , """max_initial_timestamp_index""" ):
lowerCAmelCase_ :Optional[Any] = generate_config.max_initial_timestamp_index
else:
lowerCAmelCase_ :Optional[Any] = model_config.vocab_size
if self.max_initial_timestamp_index is None:
lowerCAmelCase_ :Optional[Any] = model_config.vocab_size
def __call__( self , __A , __A , __A ) -> Any:
# suppress <|notimestamps|> which is handled by without_timestamps
lowerCAmelCase_ :Union[str, Any] = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(__A , __A ):
lowerCAmelCase_ :Any = jnp.where((cur_len - self.begin_index) >= 1 , __A , __A )
lowerCAmelCase_ :Union[str, Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , __A , )
lowerCAmelCase_ :Any = jnp.where((cur_len - self.begin_index) < 2 , __A , __A )
lowerCAmelCase_ :Optional[int] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , __A , __A , )
return jnp.where(
__A , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , __A , )
lowerCAmelCase_ :Union[str, Any] = jax.vmap(__A )(__A , __A )
lowerCAmelCase_ :str = jnp.where(cur_len == self.begin_index , __A , __A )
lowerCAmelCase_ :Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , __A , )
lowerCAmelCase_ :int = self.timestamp_begin + self.max_initial_timestamp_index
lowerCAmelCase_ :int = jnp.where(
__A , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , __A , )
# if sum of probability over timestamps is above any other token, sample timestamp
lowerCAmelCase_ :List[str] = jax.nn.log_softmax(__A , axis=-1 )
def handle_cumulative_probs(__A , __A ):
lowerCAmelCase_ :int = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
lowerCAmelCase_ :Dict = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , __A , )
lowerCAmelCase_ :int = jax.vmap(__A )(__A , __A )
return scores
| 84 |
"""simple docstring"""
def _snake_case ( lowercase__ : list[int] ) -> list[list[int]]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = []
if len(lowercase__ ) == 1:
return [nums.copy()]
for _ in range(len(lowercase__ ) ):
lowerCAmelCase_ :Optional[Any] = nums.pop(0 )
lowerCAmelCase_ :str = permute(lowercase__ )
for perm in permutations:
perm.append(lowercase__ )
result.extend(lowercase__ )
nums.append(lowercase__ )
return result
def _snake_case ( lowercase__ : Tuple ) -> List[str]:
'''simple docstring'''
def backtrack(lowercase__ : str ):
if start == len(lowercase__ ) - 1:
output.append(nums[:] )
else:
for i in range(lowercase__ , len(lowercase__ ) ):
lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start]
backtrack(start + 1 )
lowerCAmelCase_ , lowerCAmelCase_ :str = nums[i], nums[start] # backtrack
lowerCAmelCase_ :int = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__UpperCAmelCase = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 84 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , ) -> Any:
lowerCAmelCase_ :int = parent
lowerCAmelCase_ :Tuple = 13
lowerCAmelCase_ :Optional[Any] = 7
lowerCAmelCase_ :List[str] = True
lowerCAmelCase_ :Union[str, Any] = True
lowerCAmelCase_ :Tuple = True
lowerCAmelCase_ :int = 99
lowerCAmelCase_ :Optional[Any] = 32
lowerCAmelCase_ :Optional[int] = 2
lowerCAmelCase_ :Optional[Any] = 4
lowerCAmelCase_ :Any = 37
lowerCAmelCase_ :List[Any] = """gelu"""
lowerCAmelCase_ :Optional[Any] = 0.1
lowerCAmelCase_ :Dict = 0.1
lowerCAmelCase_ :Union[str, Any] = 512
lowerCAmelCase_ :Union[str, Any] = 16
lowerCAmelCase_ :Optional[int] = 2
lowerCAmelCase_ :str = 0.0_2
lowerCAmelCase_ :List[Any] = 3
lowerCAmelCase_ :Union[str, Any] = 4
lowerCAmelCase_ :int = None
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :Any = None
if self.use_input_mask:
lowerCAmelCase_ :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :Tuple = None
lowerCAmelCase_ :Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :str = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ :Optional[Any] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self ) -> List[str]:
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Tuple = self.prepare_config_and_inputs()
lowerCAmelCase_ :Union[str, Any] = True
lowerCAmelCase_ :Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase_ :int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> int:
lowerCAmelCase_ :Optional[Any] = TFEsmModel(config=__A )
lowerCAmelCase_ :Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase_ :List[str] = model(__A )
lowerCAmelCase_ :Union[str, Any] = [input_ids, input_mask]
lowerCAmelCase_ :int = model(__A )
lowerCAmelCase_ :int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A , __A , __A , ) -> Any:
lowerCAmelCase_ :Optional[int] = True
lowerCAmelCase_ :Tuple = TFEsmModel(config=__A )
lowerCAmelCase_ :List[Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
lowerCAmelCase_ :Dict = model(__A )
lowerCAmelCase_ :Optional[int] = [input_ids, input_mask]
lowerCAmelCase_ :Optional[Any] = model(__A , encoder_hidden_states=__A )
# Also check the case where encoder outputs are not passed
lowerCAmelCase_ :int = model(__A , attention_mask=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> Any:
lowerCAmelCase_ :Optional[int] = TFEsmForMaskedLM(config=__A )
lowerCAmelCase_ :Tuple = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A ) -> Optional[int]:
lowerCAmelCase_ :Dict = self.num_labels
lowerCAmelCase_ :List[str] = TFEsmForTokenClassification(config=__A )
lowerCAmelCase_ :str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCAmelCase_ :Tuple = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Optional[int] = config_and_inputs
lowerCAmelCase_ :Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ :int = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ :Any = False
UpperCAmelCase_ :List[str] = False
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :str = TFEsmModelTester(self )
lowerCAmelCase_ :List[Any] = ConfigTester(self , config_class=__A , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :Union[str, Any] = TFEsmModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __lowerCAmelCase ( self ) -> str:
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ :Union[str, Any] = model_class(__A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase_ :Optional[int] = model.get_bias()
assert isinstance(__A , __A )
for k, v in name.items():
assert isinstance(__A , tf.Variable )
else:
lowerCAmelCase_ :Tuple = model.get_output_embeddings()
assert x is None
lowerCAmelCase_ :Any = model.get_bias()
assert name is None
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Any = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowerCAmelCase_ :Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase_ :str = model(__A )[0]
lowerCAmelCase_ :str = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __A )
# compare the actual values for a slice.
lowerCAmelCase_ :int = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :List[str] = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowerCAmelCase_ :Dict = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCAmelCase_ :Tuple = model(__A )[0]
# compare the actual values for a slice.
lowerCAmelCase_ :int = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 84 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :Any = BioGptTokenizer
UpperCAmelCase_ :str = False
def __lowerCAmelCase ( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ :Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCAmelCase_ :str = dict(zip(__A , range(len(__A ) ) ) )
lowerCAmelCase_ :int = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
lowerCAmelCase_ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase_ :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__A ) )
def __lowerCAmelCase ( self , __A ) -> Optional[int]:
lowerCAmelCase_ :List[Any] = """lower newer"""
lowerCAmelCase_ :Tuple = """lower newer"""
return input_text, output_text
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase_ :Union[str, Any] = """lower"""
lowerCAmelCase_ :Any = ["""low""", """er</w>"""]
lowerCAmelCase_ :Union[str, Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCAmelCase_ :Dict = tokens + ["""<unk>"""]
lowerCAmelCase_ :List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
lowerCAmelCase_ :List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=__A )
lowerCAmelCase_ :List[str] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__A )
lowerCAmelCase_ :Optional[int] = tokenizer.build_inputs_with_special_tokens(__A )
lowerCAmelCase_ :List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 84 | 1 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :Optional[str] = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
UpperCAmelCase_ :Optional[str] = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
UpperCAmelCase_ :int = field(
default=1024 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
UpperCAmelCase_ :Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
UpperCAmelCase_ :Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
UpperCAmelCase_ :Optional[int] = field(
default=A__ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "A csv or a json file containing the training data."} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "A csv or a json file containing the validation data."} )
UpperCAmelCase_ :Optional[str] = field(default=A__ , metadata={"help": "A csv or a json file containing the test data."} )
def __lowerCAmelCase ( self ) -> int:
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
lowerCAmelCase_ :Optional[int] = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase_ :List[Any] = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _SCREAMING_SNAKE_CASE :
UpperCAmelCase_ :str = field(
default=A__ , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase_ :Optional[str] = field(
default=A__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
UpperCAmelCase_ :str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
UpperCAmelCase_ :bool = field(
default=A__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def _snake_case ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase_ :Dict = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase_ :Optional[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase_ :int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ :Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase_ :Optional[Any] = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase_ :Any = data_args.train_file.split(""".""" )[-1]
lowerCAmelCase_ :str = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase_ :List[str] = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
lowerCAmelCase_ :List[str] = load_dataset("""csv""" , data_files=lowercase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase_ :int = load_dataset("""json""" , data_files=lowercase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase_ :Dict = raw_datasets["""train"""].features["""label"""].names
lowerCAmelCase_ :Any = len(lowercase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase_ :Any = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase__ , )
lowerCAmelCase_ :Optional[int] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase_ :Tuple = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase_ :List[str] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase_ :Union[str, Any] = {"""Refused""": 0, """Entailed""": 1}
lowerCAmelCase_ :List[str] = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCAmelCase_ :List[Any] = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase__ : List[Any] ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase__ : Dict ):
lowerCAmelCase_ :List[str] = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
lowerCAmelCase_ :Optional[int] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase_ :List[Any] = examples["""statement"""]
lowerCAmelCase_ :str = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
lowerCAmelCase_ :List[str] = tokenizer(lowercase__ , lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ )
lowerCAmelCase_ :str = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
lowerCAmelCase_ :Union[str, Any] = raw_datasets.map(
lowercase__ , batched=lowercase__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
lowerCAmelCase_ :Optional[Any] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
lowerCAmelCase_ :Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
lowerCAmelCase_ :List[Any] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
lowerCAmelCase_ :Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
lowerCAmelCase_ :Optional[Any] = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
lowerCAmelCase_ :Any = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase__ ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase__ : EvalPrediction ):
lowerCAmelCase_ :Optional[Any] = p.predictions[0] if isinstance(p.predictions , lowercase__ ) else p.predictions
lowerCAmelCase_ :Optional[Any] = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase_ :List[str] = default_data_collator
elif training_args.fpaa:
lowerCAmelCase_ :Union[str, Any] = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase_ :Any = None
# Initialize our Trainer
lowerCAmelCase_ :Optional[int] = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase__ , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
lowerCAmelCase_ :Optional[int] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase_ :List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase_ :str = last_checkpoint
lowerCAmelCase_ :Any = trainer.train(resume_from_checkpoint=lowercase__ )
lowerCAmelCase_ :List[Any] = train_result.metrics
lowerCAmelCase_ :List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
lowerCAmelCase_ :Union[str, Any] = min(lowercase__ , len(lowercase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowercase__ )
trainer.save_metrics("""train""" , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase_ :List[Any] = trainer.evaluate(eval_dataset=lowercase__ )
lowerCAmelCase_ :List[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
lowerCAmelCase_ :Dict = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics("""eval""" , lowercase__ )
trainer.save_metrics("""eval""" , lowercase__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase_ :str = predict_dataset.remove_columns("""label""" )
lowerCAmelCase_ :List[str] = trainer.predict(lowercase__ , metric_key_prefix="""predict""" ).predictions
lowerCAmelCase_ :Optional[int] = np.argmax(lowercase__ , axis=1 )
lowerCAmelCase_ :Optional[int] = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowercase__ , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
lowerCAmelCase_ :Any = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def _snake_case ( lowercase__ : Tuple ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 84 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "bert-generation"
def __init__( self , __A=5_0358 , __A=1024 , __A=24 , __A=16 , __A=4096 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=0.0_2 , __A=1E-12 , __A=0 , __A=2 , __A=1 , __A="absolute" , __A=True , **__A , ) -> Tuple:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCAmelCase_ :Any = vocab_size
lowerCAmelCase_ :List[Any] = hidden_size
lowerCAmelCase_ :Optional[int] = num_hidden_layers
lowerCAmelCase_ :int = num_attention_heads
lowerCAmelCase_ :List[Any] = hidden_act
lowerCAmelCase_ :Optional[Any] = intermediate_size
lowerCAmelCase_ :List[Any] = hidden_dropout_prob
lowerCAmelCase_ :int = attention_probs_dropout_prob
lowerCAmelCase_ :Tuple = max_position_embeddings
lowerCAmelCase_ :List[str] = initializer_range
lowerCAmelCase_ :Union[str, Any] = layer_norm_eps
lowerCAmelCase_ :List[str] = position_embedding_type
lowerCAmelCase_ :Optional[int] = use_cache
| 84 | 1 |
"""simple docstring"""
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase = logging.getLogger()
def _snake_case ( ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Any = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowerCAmelCase_ :int = parser.parse_args()
return args.f
def _snake_case ( lowercase__ : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = {}
lowerCAmelCase_ :List[Any] = os.path.join(lowercase__ , """all_results.json""" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , """r""" ) as f:
lowerCAmelCase_ :Union[str, Any] = json.load(lowercase__ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
__UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _SCREAMING_SNAKE_CASE ( A__ ):
@classmethod
def __lowerCAmelCase ( cls ) -> str:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
lowerCAmelCase_ :Tuple = tempfile.mkdtemp()
lowerCAmelCase_ :Dict = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowerCAmelCase_ :List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def __lowerCAmelCase ( cls ) -> List[str]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ :List[Any] = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
lowerCAmelCase_ :Tuple = get_results(__A )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertTrue(os.path.exists(os.path.join(__A , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__A , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :int = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ :Tuple = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowerCAmelCase_ :List[str] = get_results(__A )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(__A , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__A , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Optional[int] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ :Optional[int] = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase_ :List[str] = get_results(__A )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(__A , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__A , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
lowerCAmelCase_ :Union[str, Any] = 7 if get_gpu_count() > 1 else 2
lowerCAmelCase_ :Dict = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ :str = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase_ :Tuple = get_results(__A )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.7_5 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__A , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__A , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ :Union[str, Any] = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase_ :int = get_results(__A )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(__A , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__A , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Optional[int] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ :Optional[int] = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase_ :Tuple = get_results(__A )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__A , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCAmelCase ( self ) -> Dict:
lowerCAmelCase_ :Optional[int] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ :Union[str, Any] = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase_ :Optional[Any] = get_results(__A )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(__A , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__A , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ :Optional[Any] = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase_ :List[str] = get_results(__A )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(__A , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__A , """translation_no_trainer""" ) ) )
@slow
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Tuple = logging.StreamHandler(sys.stdout )
logger.addHandler(__A )
lowerCAmelCase_ :Optional[int] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ :Optional[int] = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
lowerCAmelCase_ :int = get_results(__A )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.1_0 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :List[Any] = self.get_auto_remove_tmp_dir()
lowerCAmelCase_ :Tuple = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
lowerCAmelCase_ :Dict = get_results(__A )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__A , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__A , """image_classification_no_trainer""" ) ) )
| 84 |
"""simple docstring"""
def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = [False] * len(lowercase__ )
lowerCAmelCase_ :str = []
queue.append(lowercase__ )
lowerCAmelCase_ :Any = True
while queue:
lowerCAmelCase_ :Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase__ )
lowerCAmelCase_ :Union[str, Any] = True
lowerCAmelCase_ :int = u
return visited[t]
def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ ))
lowerCAmelCase_ :str = 0
while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
lowerCAmelCase_ :List[str] = float("""Inf""" )
lowerCAmelCase_ :List[str] = sink
while s != source:
# Find the minimum value in select path
lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] )
lowerCAmelCase_ :Union[str, Any] = parent[s]
max_flow += path_flow
lowerCAmelCase_ :Tuple = sink
while v != source:
lowerCAmelCase_ :List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCAmelCase_ :Union[str, Any] = parent[v]
return max_flow
__UpperCAmelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
__UpperCAmelCase , __UpperCAmelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 84 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Dict = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = None , __A = PILImageResampling.BILINEAR , __A = True , __A = 1 / 255 , __A = True , __A = None , __A = None , **__A , ) -> None:
super().__init__(**__A )
lowerCAmelCase_ :Any = size if size is not None else {"""shortest_edge""": 384}
lowerCAmelCase_ :List[Any] = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :List[str] = do_resize
lowerCAmelCase_ :int = size
# Default value set here for backwards compatibility where the value in config is None
lowerCAmelCase_ :Dict = crop_pct if crop_pct is not None else 224 / 256
lowerCAmelCase_ :Tuple = resample
lowerCAmelCase_ :List[Any] = do_rescale
lowerCAmelCase_ :Optional[Any] = rescale_factor
lowerCAmelCase_ :int = do_normalize
lowerCAmelCase_ :Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ :Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self , __A , __A , __A , __A = PILImageResampling.BICUBIC , __A = None , **__A , ) -> np.ndarray:
lowerCAmelCase_ :List[Any] = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
lowerCAmelCase_ :Dict = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCAmelCase_ :List[Any] = int(shortest_edge / crop_pct )
lowerCAmelCase_ :int = get_resize_output_image_size(__A , size=__A , default_to_square=__A )
lowerCAmelCase_ :Tuple = resize(image=__A , size=__A , resample=__A , data_format=__A , **__A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__A , size=(shortest_edge, shortest_edge) , data_format=__A , **__A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__A , size=(shortest_edge, shortest_edge) , resample=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A = None , **__A , ) -> Optional[Any]:
return rescale(__A , scale=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def __lowerCAmelCase ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> PIL.Image.Image:
lowerCAmelCase_ :Any = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ :Dict = crop_pct if crop_pct is not None else self.crop_pct
lowerCAmelCase_ :Optional[int] = resample if resample is not None else self.resample
lowerCAmelCase_ :Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ :int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ :int = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ :Tuple = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ :str = image_std if image_std is not None else self.image_std
lowerCAmelCase_ :str = size if size is not None else self.size
lowerCAmelCase_ :Any = get_size_dict(__A , default_to_square=__A )
lowerCAmelCase_ :int = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCAmelCase_ :str = [to_numpy_array(__A ) for image in images]
if do_resize:
lowerCAmelCase_ :List[Any] = [self.resize(image=__A , size=__A , crop_pct=__A , resample=__A ) for image in images]
if do_rescale:
lowerCAmelCase_ :Any = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
lowerCAmelCase_ :str = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
lowerCAmelCase_ :Any = [to_channel_dimension_format(__A , __A ) for image in images]
lowerCAmelCase_ :Optional[Any] = {"""pixel_values""": images}
return BatchFeature(data=__A , tensor_type=__A )
| 84 |
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = 1_0
lowerCAmelCase_ :Optional[int] = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
lowerCAmelCase_ :int = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [9_7], """text""": ["""1976"""]}] * 1_0,
"""id""": list(range(lowercase__ ) ),
} , features=lowercase__ , )
return dataset
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=lowercase__ )
return filename
# FILE_CONTENT + files
__UpperCAmelCase = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
lowerCAmelCase_ :List[Any] = FILE_CONTENT
with open(lowercase__ , """w""" ) as f:
f.write(lowercase__ )
return filename
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[Any] ) -> Tuple:
'''simple docstring'''
import bza
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" )
with bza.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
import gzip
lowerCAmelCase_ :int = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
lowerCAmelCase_ :Tuple = bytes(lowercase__ , """utf-8""" )
with gzip.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Optional[int]:
'''simple docstring'''
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCAmelCase_ :List[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
lowerCAmelCase_ :int = bytes(lowercase__ , """utf-8""" )
with lza.frame.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict , lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(lowercase__ , """w""" ) as archive:
archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
import tarfile
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> str:
'''simple docstring'''
import lzma
lowerCAmelCase_ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
lowerCAmelCase_ :Optional[Any] = bytes(lowercase__ , """utf-8""" )
with lzma.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
import zipfile
lowerCAmelCase_ :Dict = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> Tuple:
'''simple docstring'''
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCAmelCase_ :Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
lowerCAmelCase_ :Any = bytes(lowercase__ , """utf-8""" )
with zstd.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
lowerCAmelCase_ :Any = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(lowercase__ , """w""" ) as f:
f.write(lowercase__ )
return filename
__UpperCAmelCase = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
__UpperCAmelCase = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
__UpperCAmelCase = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
__UpperCAmelCase = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
__UpperCAmelCase = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Tuple = datasets.Dataset.from_dict(lowercase__ )
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con:
lowerCAmelCase_ :Union[str, Any] = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(lowercase__ , """w""" , newline="""""" ) as f:
lowerCAmelCase_ :Optional[int] = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(lowercase__ , """w""" , newline="""""" ) as f:
lowerCAmelCase_ :Dict = csv.DictWriter(lowercase__ , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
import bza
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(lowercase__ , """rb""" ) as f:
lowerCAmelCase_ :Union[str, Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(lowercase__ , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
lowerCAmelCase_ :Optional[Any] = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(lowercase__ , """wb""" ) as f:
lowerCAmelCase_ :Optional[int] = pq.ParquetWriter(lowercase__ , schema=lowercase__ )
lowerCAmelCase_ :List[str] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ )
writer.write_table(lowercase__ )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowerCAmelCase_ :Union[str, Any] = {"""data""": DATA}
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : str ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
lowerCAmelCase_ :Optional[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(lowercase__ , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(lowercase__ , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowercase__ ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : int , lowercase__ : Dict ) -> Optional[int]:
'''simple docstring'''
import gzip
lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(lowercase__ , """rb""" ) as orig_file:
with gzip.open(lowercase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
import gzip
lowerCAmelCase_ :Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(lowercase__ , """rb""" ) as orig_file:
with gzip.open(lowercase__ , """wb""" ) as zipped_file:
zipped_file.writelines(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : List[str] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : str , lowercase__ : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Dict , lowercase__ : str , lowercase__ : List[str] , lowercase__ : int ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(lowercase__ , """w""" ) as f:
f.add(lowercase__ , arcname=os.path.join("""nested""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :str = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :List[str] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Dict = ["""0""", """1""", """2""", """3"""]
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(lowercase__ , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : List[str] , lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Any = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : List[str] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :str = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
f.write(lowercase__ , arcname=os.path.join("""main_dir""" , os.path.basename(lowercase__ ) ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : Tuple ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(lowercase__ , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
lowerCAmelCase_ :str = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> int:
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Any , lowercase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(lowercase__ , """w""" ) as f:
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) )
f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def _snake_case ( lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :int = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 1_0 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 1_0 )
return data_dir
| 84 | 1 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class _SCREAMING_SNAKE_CASE ( A__ ):
pass
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A ) -> None:
lowerCAmelCase_ :Any = data
lowerCAmelCase_ :Node | None = None
def __iter__( self ) -> Dict:
lowerCAmelCase_ :int = self
lowerCAmelCase_ :Tuple = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
lowerCAmelCase_ :Tuple = node.next_node
@property
def __lowerCAmelCase ( self ) -> bool:
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__UpperCAmelCase = Node(1)
__UpperCAmelCase = Node(2)
__UpperCAmelCase = Node(3)
__UpperCAmelCase = Node(4)
print(root_node.has_loop) # False
__UpperCAmelCase = root_node.next_node
print(root_node.has_loop) # True
__UpperCAmelCase = Node(5)
__UpperCAmelCase = Node(6)
__UpperCAmelCase = Node(5)
__UpperCAmelCase = Node(6)
print(root_node.has_loop) # False
__UpperCAmelCase = Node(1)
print(root_node.has_loop) # False
| 84 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[Any] = "data2vec-text"
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.0_2 , __A=1E-12 , __A=1 , __A=0 , __A=2 , __A="absolute" , __A=True , __A=None , **__A , ) -> Tuple:
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCAmelCase_ :Dict = vocab_size
lowerCAmelCase_ :Dict = hidden_size
lowerCAmelCase_ :int = num_hidden_layers
lowerCAmelCase_ :List[Any] = num_attention_heads
lowerCAmelCase_ :Any = hidden_act
lowerCAmelCase_ :Optional[int] = intermediate_size
lowerCAmelCase_ :str = hidden_dropout_prob
lowerCAmelCase_ :Any = attention_probs_dropout_prob
lowerCAmelCase_ :str = max_position_embeddings
lowerCAmelCase_ :int = type_vocab_size
lowerCAmelCase_ :Tuple = initializer_range
lowerCAmelCase_ :List[Any] = layer_norm_eps
lowerCAmelCase_ :List[Any] = position_embedding_type
lowerCAmelCase_ :List[Any] = use_cache
lowerCAmelCase_ :List[Any] = classifier_dropout
class _SCREAMING_SNAKE_CASE ( A__ ):
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase_ :List[Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase_ :List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 84 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _snake_case ( ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :str = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
lowerCAmelCase_ :Tuple = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase__ )
# Let's go
lowerCAmelCase_ :Tuple = parser.parse_args()
if not hasattr(lowercase__ , """func""" ):
parser.print_help()
exit(1 )
# Run
lowerCAmelCase_ :List[str] = args.func(lowercase__ )
service.run()
if __name__ == "__main__":
main()
| 84 |
"""simple docstring"""
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowercase__ : Dict , lowercase__ : Dict , lowercase__ : str , lowercase__ : Tuple="attention" ) -> str:
'''simple docstring'''
lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/{layer_name}/key/kernel"""]
lowerCAmelCase_ :Union[str, Any] = params[f"""{prefix}/layers_{i}/{layer_name}/out/kernel"""]
lowerCAmelCase_ :Any = params[f"""{prefix}/layers_{i}/{layer_name}/query/kernel"""]
lowerCAmelCase_ :Optional[int] = params[f"""{prefix}/layers_{i}/{layer_name}/value/kernel"""]
return k, o, q, v
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , lowercase__ : Any=False ) -> int:
'''simple docstring'''
if split_mlp_wi:
lowerCAmelCase_ :Tuple = params[f"""{prefix}/layers_{i}/mlp/wi_0/kernel"""]
lowerCAmelCase_ :List[str] = params[f"""{prefix}/layers_{i}/mlp/wi_1/kernel"""]
lowerCAmelCase_ :Tuple = (wi_a, wi_a)
else:
lowerCAmelCase_ :List[Any] = params[f"""{prefix}/layers_{i}/mlp/wi/kernel"""]
lowerCAmelCase_ :Dict = params[f"""{prefix}/layers_{i}/mlp/wo/kernel"""]
return wi, wo
def _snake_case ( lowercase__ : Any , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
return params[f"""{prefix}/layers_{i}/{layer_name}/scale"""]
def _snake_case ( lowercase__ : dict , *, lowercase__ : int , lowercase__ : bool ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = traverse_util.flatten_dict(variables["""target"""] )
lowerCAmelCase_ :Tuple = {"""/""".join(lowercase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase_ :Any = """encoder/layers_0/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowercase__ )
lowerCAmelCase_ :List[Any] = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase_ :Optional[int] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :str = tax_attention_lookup(lowercase__ , lowercase__ , """encoder""" , """attention""" )
lowerCAmelCase_ :Optional[Any] = layer_norm
lowerCAmelCase_ :Any = k.T
lowerCAmelCase_ :Tuple = o.T
lowerCAmelCase_ :Tuple = q.T
lowerCAmelCase_ :str = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase_ :Dict = tax_layer_norm_lookup(lowercase__ , lowercase__ , """encoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ :Any = tax_mlp_lookup(lowercase__ , lowercase__ , """encoder""" , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = layer_norm
if split_mlp_wi:
lowerCAmelCase_ :List[Any] = wi[0].T
lowerCAmelCase_ :Dict = wi[1].T
else:
lowerCAmelCase_ :int = wi.T
lowerCAmelCase_ :List[str] = wo.T
lowerCAmelCase_ :Tuple = old[
"""encoder/relpos_bias/rel_embedding"""
].T
lowerCAmelCase_ :List[str] = old["""encoder/encoder_norm/scale"""]
if not is_encoder_only:
# Decoder.
for i in range(lowercase__ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase_ :Optional[Any] = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_self_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """self_attention""" )
lowerCAmelCase_ :List[Any] = layer_norm
lowerCAmelCase_ :List[str] = k.T
lowerCAmelCase_ :Any = o.T
lowerCAmelCase_ :Any = q.T
lowerCAmelCase_ :Dict = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase_ :int = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Tuple = tax_attention_lookup(lowercase__ , lowercase__ , """decoder""" , """encoder_decoder_attention""" )
lowerCAmelCase_ :Optional[int] = layer_norm
lowerCAmelCase_ :str = k.T
lowerCAmelCase_ :Tuple = o.T
lowerCAmelCase_ :Any = q.T
lowerCAmelCase_ :int = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase_ :Any = tax_layer_norm_lookup(lowercase__ , lowercase__ , """decoder""" , """pre_mlp_layer_norm""" )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = tax_mlp_lookup(lowercase__ , lowercase__ , """decoder""" , lowercase__ )
lowerCAmelCase_ :List[Any] = layer_norm
if split_mlp_wi:
lowerCAmelCase_ :Any = wi[0].T
lowerCAmelCase_ :Any = wi[1].T
else:
lowerCAmelCase_ :Tuple = wi.T
lowerCAmelCase_ :List[str] = wo.T
lowerCAmelCase_ :Optional[Any] = old["""decoder/decoder_norm/scale"""]
lowerCAmelCase_ :Optional[Any] = old[
"""decoder/relpos_bias/rel_embedding"""
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase_ :Tuple = old["""decoder/logits_dense/kernel"""].T
return new
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : bool ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ :Optional[int] = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase_ :Tuple = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
lowerCAmelCase_ :Any = state_dict["""shared.weight"""]
return state_dict
def _snake_case ( lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = checkpoints.load_tax_checkpoint(lowercase__ )
lowerCAmelCase_ :Optional[int] = convert_tax_to_pytorch(lowercase__ , num_layers=config.num_layers , is_encoder_only=lowercase__ )
lowerCAmelCase_ :Union[str, Any] = make_state_dict(lowercase__ , lowercase__ )
model.load_state_dict(lowercase__ , strict=lowercase__ )
def _snake_case ( lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str , lowercase__ : bool = False ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Any = TaConfig.from_json_file(lowercase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase_ :List[Any] = TaEncoderModel(lowercase__ )
else:
lowerCAmelCase_ :List[str] = TaForConditionalGeneration(lowercase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase__ )
print("""Done""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
__UpperCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 84 | 1 |
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = [True] * 1_00_00_01
__UpperCAmelCase = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
__UpperCAmelCase = False
i += 1
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
return seive[n]
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
return any(digit in """02468""" for digit in str(lowercase__ ) )
def _snake_case ( lowercase__ : int = 1_0_0_0_0_0_0 ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(lowercase__ ) and not contains_an_even_digit(lowercase__ ):
lowerCAmelCase_ :str = str(lowercase__ )
lowerCAmelCase_ :str = [int(str_num[j:] + str_num[:j] ) for j in range(len(lowercase__ ) )]
if all(is_prime(lowercase__ ) for i in list_nums ):
result.append(lowercase__ )
return result
def _snake_case ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"""{len(find_circular_primes()) = }""")
| 84 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" )
if "norm" in key:
lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" )
if "layer_norm1" in key:
lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" )
if "attn.q" in key:
lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" )
if "bot_conv" in key:
lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCAmelCase_ :List[Any] = value
return new_state_dict
def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ :Optional[Any] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ :List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :]
def _snake_case ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCAmelCase_ :List[Any] = prepare_img()
lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ )
# key and value matrices need special treatment
read_in_k_v(lowercase__ , lowercase__ )
# create HuggingFace model and load state dict
lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# forward pass
lowerCAmelCase_ :Dict = model(lowercase__ )
lowerCAmelCase_ :Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase_ :Optional[Any] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
lowerCAmelCase_ :Any = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__UpperCAmelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 84 | 1 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _snake_case ( lowercase__ : Tuple ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :List[str] = os.path.join(args.tf_model_dir , """parameters.json""" )
lowerCAmelCase_ :Tuple = json.loads(open(lowercase__ ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
lowerCAmelCase_ :List[Any] = args.output + """.pt"""
lowerCAmelCase_ :Optional[int] = OrderedDict()
with tf.device("""/CPU:0""" ):
lowerCAmelCase_ :Tuple = tf.train.load_checkpoint(args.tf_model_dir )
lowerCAmelCase_ :List[Any] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCAmelCase_ :List[str] = reader.get_tensor(lowercase__ ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
lowerCAmelCase_ :Dict = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
lowerCAmelCase_ :Any = 8
lowerCAmelCase_ :Tuple = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCAmelCase_ :List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ :List[Any] = torch.tensor(lowercase__ )
elif key_name.startswith("""model/moe""" ):
lowerCAmelCase_ :Optional[Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
lowerCAmelCase_ :int = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
lowerCAmelCase_ :List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ :Union[str, Any] = torch.tensor(lowercase__ )
elif key_name.endswith("""/softmlp/kernel""" ):
lowerCAmelCase_ :Tuple = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
lowerCAmelCase_ :List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ :Any = torch.tensor(lowercase__ )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
lowerCAmelCase_ :Tuple = key_name[-9:-7]
for i in range(1_6 ):
lowerCAmelCase_ :Dict = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
lowerCAmelCase_ :Optional[Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCAmelCase_ :List[str] = torch.tensor(lowercase__ )
elif key_name.startswith("""model/mlp""" ):
lowerCAmelCase_ :Optional[Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
lowerCAmelCase_ :List[str] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
lowerCAmelCase_ :Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ :Dict = torch.tensor(lowercase__ )
elif key_name.endswith("""/p1/bias""" ):
lowerCAmelCase_ :Tuple = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
lowerCAmelCase_ :List[str] = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ :Any = torch.tensor(lowercase__ )
elif key_name.endswith("""/p2/kernel""" ):
lowerCAmelCase_ :List[Any] = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
lowerCAmelCase_ :Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ :List[Any] = torch.tensor(lowercase__ )
elif key_name.endswith("""/p2/bias""" ):
lowerCAmelCase_ :Any = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
lowerCAmelCase_ :List[Any] = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ :Tuple = torch.tensor(lowercase__ )
elif key_name.startswith("""model/ln""" ):
lowerCAmelCase_ :str = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
lowerCAmelCase_ :Dict = """model.blocks.%d.feed_forward.norm.bias""" % player
lowerCAmelCase_ :List[Any] = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ :List[Any] = torch.tensor(lowercase__ )
elif key_name.endswith("""/g""" ):
lowerCAmelCase_ :Tuple = """model.blocks.%d.feed_forward.norm.weight""" % player
lowerCAmelCase_ :Tuple = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ :List[str] = torch.tensor(lowercase__ )
elif key_name.startswith("""model/att""" ):
lowerCAmelCase_ :Optional[Any] = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
lowerCAmelCase_ :Any = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCAmelCase_ :str = state[:, 0, :, :]
lowerCAmelCase_ :int = state[:, 1, :, :]
lowerCAmelCase_ :Dict = state[:, 2, :, :]
lowerCAmelCase_ :Any = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ :Dict = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ :Dict = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ :Optional[Any] = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
lowerCAmelCase_ :List[str] = torch.tensor(lowercase__ )
lowerCAmelCase_ :List[str] = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
lowerCAmelCase_ :Union[str, Any] = torch.tensor(lowercase__ )
lowerCAmelCase_ :str = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
lowerCAmelCase_ :Optional[int] = torch.tensor(lowercase__ )
elif key_name.endswith("""/o/kernel""" ):
lowerCAmelCase_ :List[Any] = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
lowerCAmelCase_ :int = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ :Optional[int] = torch.tensor(lowercase__ )
elif key_name.startswith("""model/an""" ):
lowerCAmelCase_ :Any = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
lowerCAmelCase_ :Any = """model.blocks.%d.self_attn.norm.bias""" % player
lowerCAmelCase_ :Optional[int] = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ :List[Any] = torch.tensor(lowercase__ )
elif key_name.endswith("""/g""" ):
lowerCAmelCase_ :Dict = """model.blocks.%d.self_attn.norm.weight""" % player
lowerCAmelCase_ :Dict = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ :Tuple = torch.tensor(lowercase__ )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
lowerCAmelCase_ :Optional[Any] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
lowerCAmelCase_ :str = """model.%s.weight""" % nlayer
lowerCAmelCase_ :Union[str, Any] = vnp.copy() # same in embedded
lowerCAmelCase_ :Optional[Any] = torch.tensor(lowercase__ )
if key_name.startswith("""model/wte""" ):
lowerCAmelCase_ :Optional[Any] = """lm_head.weight"""
lowerCAmelCase_ :int = vnp.copy() # same in embedded
lowerCAmelCase_ :int = torch.tensor(lowercase__ )
elif key_name.startswith("""model/wob""" ):
lowerCAmelCase_ :Union[str, Any] = """final_logits_bias"""
lowerCAmelCase_ :Tuple = vnp.copy() # same in embedded
lowerCAmelCase_ :int = state.reshape((1, -1) )
lowerCAmelCase_ :Optional[int] = torch.tensor(lowercase__ )
elif key_name == "model/dense/kernel":
lowerCAmelCase_ :Optional[int] = """model.last_project.weight"""
lowerCAmelCase_ :Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ :List[str] = torch.tensor(lowercase__ )
elif key_name == "model/dense_1/bias":
lowerCAmelCase_ :Tuple = """model.last_project.bias"""
lowerCAmelCase_ :List[str] = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ :List[Any] = torch.tensor(lowercase__ )
torch.save(lowercase__ , args.output )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
__UpperCAmelCase = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 84 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 84 | 1 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Union[str, Any] = (UnCLIPScheduler,)
def __lowerCAmelCase ( self , **__A ) -> int:
lowerCAmelCase_ :Dict = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**__A )
return config
def __lowerCAmelCase ( self ) -> Dict:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __lowerCAmelCase ( self ) -> Any:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__A )
def __lowerCAmelCase ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def __lowerCAmelCase ( self ) -> Dict:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__A )
def __lowerCAmelCase ( self ) -> str:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__A )
def __lowerCAmelCase ( self ) -> Tuple:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__A , prev_timestep=__A )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = self.scheduler_classes[0]
lowerCAmelCase_ :Optional[Any] = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowerCAmelCase_ :Any = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :int = self.scheduler_classes[0]
lowerCAmelCase_ :Union[str, Any] = self.get_scheduler_config(variance_type="""learned_range""" )
lowerCAmelCase_ :Optional[Any] = scheduler_class(**__A )
lowerCAmelCase_ :Tuple = 0.5
assert scheduler._get_variance(1 , predicted_variance=__A ) - -1_0.1_7_1_2_7_9_0 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=__A ) - -5.7_9_9_8_0_5_2 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=__A ) - -0.0_0_1_0_0_1_1 < 1E-5
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :List[Any] = self.scheduler_classes[0]
lowerCAmelCase_ :Any = self.get_scheduler_config()
lowerCAmelCase_ :str = scheduler_class(**__A )
lowerCAmelCase_ :List[Any] = scheduler.timesteps
lowerCAmelCase_ :Dict = self.dummy_model()
lowerCAmelCase_ :str = self.dummy_sample_deter
lowerCAmelCase_ :Any = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
lowerCAmelCase_ :Dict = model(__A , __A )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ :List[str] = scheduler.step(__A , __A , __A , generator=__A ).prev_sample
lowerCAmelCase_ :str = pred_prev_sample
lowerCAmelCase_ :List[str] = torch.sum(torch.abs(__A ) )
lowerCAmelCase_ :Union[str, Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Any = self.scheduler_classes[0]
lowerCAmelCase_ :Dict = self.get_scheduler_config()
lowerCAmelCase_ :int = scheduler_class(**__A )
scheduler.set_timesteps(25 )
lowerCAmelCase_ :Tuple = scheduler.timesteps
lowerCAmelCase_ :List[str] = self.dummy_model()
lowerCAmelCase_ :List[str] = self.dummy_sample_deter
lowerCAmelCase_ :List[str] = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
lowerCAmelCase_ :Optional[Any] = model(__A , __A )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase_ :List[str] = None
else:
lowerCAmelCase_ :Any = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ :Optional[Any] = scheduler.step(
__A , __A , __A , prev_timestep=__A , generator=__A ).prev_sample
lowerCAmelCase_ :List[Any] = pred_prev_sample
lowerCAmelCase_ :int = torch.sum(torch.abs(__A ) )
lowerCAmelCase_ :Optional[Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3
def __lowerCAmelCase ( self ) -> int:
pass
def __lowerCAmelCase ( self ) -> str:
pass
| 84 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/levit-128S': 'https://huggingface.co/facebook/levit-128S/resolve/main/config.json',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "levit"
def __init__( self , __A=224 , __A=3 , __A=3 , __A=2 , __A=1 , __A=16 , __A=[128, 256, 384] , __A=[4, 8, 12] , __A=[4, 4, 4] , __A=[16, 16, 16] , __A=0 , __A=[2, 2, 2] , __A=[2, 2, 2] , __A=0.0_2 , **__A , ) -> Any:
super().__init__(**__A )
lowerCAmelCase_ :Tuple = image_size
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :Union[str, Any] = kernel_size
lowerCAmelCase_ :Optional[Any] = stride
lowerCAmelCase_ :Optional[int] = padding
lowerCAmelCase_ :Optional[Any] = hidden_sizes
lowerCAmelCase_ :Optional[int] = num_attention_heads
lowerCAmelCase_ :int = depths
lowerCAmelCase_ :List[str] = key_dim
lowerCAmelCase_ :str = drop_path_rate
lowerCAmelCase_ :Optional[int] = patch_size
lowerCAmelCase_ :Union[str, Any] = attention_ratio
lowerCAmelCase_ :Dict = mlp_ratio
lowerCAmelCase_ :Any = initializer_range
lowerCAmelCase_ :Optional[int] = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Tuple = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-4
| 84 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ : tuple[int, int] , lowercase__ : int ) -> list[tuple[int, int]]:
'''simple docstring'''
lowerCAmelCase_ , lowerCAmelCase_ :int = position
lowerCAmelCase_ :List[str] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCAmelCase_ :Optional[int] = []
for position in positions:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowercase__ )
return permissible_positions
def _snake_case ( lowercase__ : list[list[int]] ) -> bool:
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( lowercase__ : list[list[int]] , lowercase__ : tuple[int, int] , lowercase__ : int ) -> bool:
'''simple docstring'''
if is_complete(lowercase__ ):
return True
for position in get_valid_pos(lowercase__ , len(lowercase__ ) ):
lowerCAmelCase_ , lowerCAmelCase_ :int = position
if board[y][x] == 0:
lowerCAmelCase_ :Dict = curr + 1
if open_knight_tour_helper(lowercase__ , lowercase__ , curr + 1 ):
return True
lowerCAmelCase_ :Tuple = 0
return False
def _snake_case ( lowercase__ : int ) -> list[list[int]]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = [[0 for i in range(lowercase__ )] for j in range(lowercase__ )]
for i in range(lowercase__ ):
for j in range(lowercase__ ):
lowerCAmelCase_ :List[str] = 1
if open_knight_tour_helper(lowercase__ , (i, j) , 1 ):
return board
lowerCAmelCase_ :Dict = 0
lowerCAmelCase_ :int = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84 |
"""simple docstring"""
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def _snake_case ( lowercase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCAmelCase_ :Union[str, Any] = FileLock(str(tmpdir / """foo.lock""" ) )
lowerCAmelCase_ :Dict = 0.01
with locka.acquire():
with pytest.raises(lowercase__ ):
lowerCAmelCase_ :List[Any] = time.time()
locka.acquire(lowercase__ )
assert time.time() - _start > timeout
def _snake_case ( lowercase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = """a""" * 1_0_0_0 + """.lock"""
lowerCAmelCase_ :Optional[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowercase__ )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
lowerCAmelCase_ :Any = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowercase__ ):
locka.acquire(0 )
| 84 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.