code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__snake_case :int = logging.get_logger(__name__)
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = [label.strip() for label in labels.split(''',''') if label.strip()]
return labels
def __call__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE) == 0 or len(__SCREAMING_SNAKE_CASE) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''')
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(__SCREAMING_SNAKE_CASE))
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = [sequences]
__a = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__SCREAMING_SNAKE_CASE)] for label in labels])
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : str=ZeroShotClassificationArgumentHandler() , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = args_parser
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''')
@property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail'''):
return ind
return -1
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=TruncationStrategy.ONLY_FIRST , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''')
__a = self.tokenizer.eos_token
try:
__a = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )
except Exception as e:
if "too short" in str(__SCREAMING_SNAKE_CASE):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__a = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _lowerCamelCase ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
if kwargs.get('''multi_class''' , __SCREAMING_SNAKE_CASE) is not None:
__a = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''')
__a = {}
if "candidate_labels" in kwargs:
__a = self._args_parser._parse_labels(kwargs['''candidate_labels'''])
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
__a = {}
if "multi_label" in kwargs:
__a = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE) == 0:
pass
elif len(__SCREAMING_SNAKE_CASE) == 1 and "candidate_labels" not in kwargs:
__a = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}')
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : int="This example is {}."):
'''simple docstring'''
__a , __a = self._args_parser(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for i, (candidate_label, sequence_pair) in enumerate(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)):
__a = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__SCREAMING_SNAKE_CASE) - 1,
**model_input,
}
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = inputs['''candidate_label''']
__a = inputs['''sequence''']
__a = {k: inputs[k] for k in self.tokenizer.model_input_names}
__a = self.model(**__SCREAMING_SNAKE_CASE)
__a = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int]=False):
'''simple docstring'''
__a = [outputs['''candidate_label'''] for outputs in model_outputs]
__a = [outputs['''sequence'''] for outputs in model_outputs]
__a = np.concatenate([output['''logits'''].numpy() for output in model_outputs])
__a = logits.shape[0]
__a = len(__SCREAMING_SNAKE_CASE)
__a = N // n
__a = logits.reshape((num_sequences, n, -1))
if multi_label or len(__SCREAMING_SNAKE_CASE) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__a = self.entailment_id
__a = -1 if entailment_id == 0 else 0
__a = reshaped_outputs[..., [contradiction_id, entailment_id]]
__a = np.exp(__SCREAMING_SNAKE_CASE) / np.exp(__SCREAMING_SNAKE_CASE).sum(-1 , keepdims=__SCREAMING_SNAKE_CASE)
__a = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__a = reshaped_outputs[..., self.entailment_id]
__a = np.exp(__SCREAMING_SNAKE_CASE) / np.exp(__SCREAMING_SNAKE_CASE).sum(-1 , keepdims=__SCREAMING_SNAKE_CASE)
__a = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 60 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__snake_case :List[Any] = None
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case :Union[str, Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__snake_case :Optional[Any] = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__snake_case :Optional[int] = '''▁'''
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : Dict = BarthezTokenizer
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = vocab_file
__a = False if not self.vocab_file else True
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 60 | 1 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
__snake_case :Optional[Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
__snake_case :Optional[int] = '''main'''
# Default branch name
__snake_case :Any = '''f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'''
# One particular commit (not the top of `main`)
__snake_case :Tuple = '''aaaaaaa'''
# This commit does not exist, so we should 404.
__snake_case :str = '''d9e9f15bc825e4b2c9249e9578f884bbcb5e3684'''
# Sha-1 of config.json on the top of `main`, for checking purposes
__snake_case :List[Any] = '''4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'''
@contextlib.contextmanager
def __snake_case ( ):
print('''Welcome!''' )
yield
print('''Bye!''' )
@contextlib.contextmanager
def __snake_case ( ):
print('''Bonjour!''' )
yield
print('''Au revoir!''' )
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec('''transformers''') is not None
class _A ( unittest.TestCase ):
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO)
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
with ContextManagers([]):
print('''Transformers are awesome!''')
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , '''Transformers are awesome!\n''')
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
with ContextManagers([context_en()]):
print('''Transformers are awesome!''')
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Welcome!\nTransformers are awesome!\nBye!\n''')
@unittest.mock.patch('''sys.stdout''' , new_callable=io.StringIO)
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
with ContextManagers([context_fr(), context_en()]):
print('''Transformers are awesome!''')
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , '''Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n''')
@require_torch
def _lowerCamelCase ( self : int):
'''simple docstring'''
self.assertEqual(find_labels(__SCREAMING_SNAKE_CASE) , ['''labels'''])
self.assertEqual(find_labels(__SCREAMING_SNAKE_CASE) , ['''labels''', '''next_sentence_label'''])
self.assertEqual(find_labels(__SCREAMING_SNAKE_CASE) , ['''start_positions''', '''end_positions'''])
class _A ( __UpperCAmelCase ):
pass
self.assertEqual(find_labels(__SCREAMING_SNAKE_CASE) , ['''labels'''])
@require_tf
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
self.assertEqual(find_labels(__SCREAMING_SNAKE_CASE) , ['''labels'''])
self.assertEqual(find_labels(__SCREAMING_SNAKE_CASE) , ['''labels''', '''next_sentence_label'''])
self.assertEqual(find_labels(__SCREAMING_SNAKE_CASE) , ['''start_positions''', '''end_positions'''])
class _A ( __UpperCAmelCase ):
pass
self.assertEqual(find_labels(__SCREAMING_SNAKE_CASE) , ['''labels'''])
@require_flax
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self.assertEqual(find_labels(__SCREAMING_SNAKE_CASE) , [])
self.assertEqual(find_labels(__SCREAMING_SNAKE_CASE) , [])
self.assertEqual(find_labels(__SCREAMING_SNAKE_CASE) , [])
class _A ( __UpperCAmelCase ):
pass
self.assertEqual(find_labels(__SCREAMING_SNAKE_CASE) , [])
| 60 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__snake_case :Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__snake_case :Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __snake_case ( _UpperCAmelCase ):
__a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(rows * cols * num_images )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
__a = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 )
return data
@deprecated(_UpperCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = labels_dense.shape[0]
__a = numpy.arange(_UpperCAmelCase ) * num_classes
__a = numpy.zeros((num_labels, num_classes) )
__a = 1
return labels_one_hot
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(_UpperCAmelCase )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase )
return labels
class _A :
@deprecated(
__SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Any=dtypes.floataa , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=None , ):
'''simple docstring'''
__a , __a = random_seed.get_seed(__SCREAMING_SNAKE_CASE)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
__a = dtypes.as_dtype(__SCREAMING_SNAKE_CASE).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype)
if fake_data:
__a = 10_000
__a = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
__a = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__a = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__a = images.astype(numpy.floataa)
__a = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_55.0)
__a = images
__a = labels
__a = 0
__a = 0
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self._images
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self._labels
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._num_examples
@property
def _lowerCamelCase ( self : str):
'''simple docstring'''
return self._epochs_completed
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True):
'''simple docstring'''
if fake_data:
__a = [1] * 784
__a = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__SCREAMING_SNAKE_CASE)],
[fake_label for _ in range(__SCREAMING_SNAKE_CASE)],
)
__a = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perma]
__a = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__a = self._num_examples - start
__a = self._images[start : self._num_examples]
__a = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perm]
__a = self.labels[perm]
# Start next epoch
__a = 0
__a = batch_size - rest_num_examples
__a = self._index_in_epoch
__a = self._images[start:end]
__a = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0),
)
else:
self._index_in_epoch += batch_size
__a = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase , '''Please write your own downloading logic.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
__a = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
__a = f.size()
print('''Successfully downloaded''' , _UpperCAmelCase , _UpperCAmelCase , '''bytes.''' )
return filepath
@deprecated(
_UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=5000 , _UpperCAmelCase=None , _UpperCAmelCase=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase )
__a = fake()
__a = fake()
__a = fake()
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
if not source_url: # empty string check
__a = DEFAULT_SOURCE_URL
__a = '''train-images-idx3-ubyte.gz'''
__a = '''train-labels-idx1-ubyte.gz'''
__a = '''t10k-images-idx3-ubyte.gz'''
__a = '''t10k-labels-idx1-ubyte.gz'''
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
__a = (
'''Validation size should be between 0 and '''
f'{len(_UpperCAmelCase )}. Received: {validation_size}.'
)
raise ValueError(_UpperCAmelCase )
__a = train_images[:validation_size]
__a = train_labels[:validation_size]
__a = train_images[validation_size:]
__a = train_labels[validation_size:]
__a = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
| 60 | 1 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class _A :
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]="resnet50" , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Tuple=True , ):
'''simple docstring'''
__a = parent
__a = out_indices if out_indices is not None else [4]
__a = stage_names
__a = out_features
__a = backbone
__a = batch_size
__a = image_size
__a = num_channels
__a = use_pretrained_backbone
__a = is_training
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__a = self.get_config()
return config, pixel_values
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = TimmBackbone(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
with torch.no_grad():
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Optional[int] = (TimmBackbone,) if is_torch_available() else ()
UpperCamelCase__ : Optional[int] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : List[str] = False
UpperCamelCase__ : str = False
UpperCamelCase__ : str = False
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = TimmBackboneModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = '''resnet18'''
__a = '''microsoft/resnet-18'''
__a = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , use_timm_backbone=__SCREAMING_SNAKE_CASE)
__a = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(len(timm_model.stage_names) , len(transformers_model.stage_names))
self.assertEqual(timm_model.channels , transformers_model.channels)
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,))
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names) - 1])
__a = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , use_timm_backbone=__SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3])
__a = AutoBackbone.from_pretrained(__SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3])
self.assertEqual(timm_model.out_indices , transformers_model.out_indices)
self.assertEqual(len(timm_model.out_features) , len(transformers_model.out_features))
self.assertEqual(timm_model.channels , transformers_model.channels)
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''')
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''')
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''')
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''')
def _lowerCamelCase ( self : int):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''')
def _lowerCamelCase ( self : Any):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''')
def _lowerCamelCase ( self : str):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''')
def _lowerCamelCase ( self : str):
'''simple docstring'''
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''')
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''')
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''')
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''')
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
pass
@unittest.skip('''Safetensors is not supported by timm.''')
def _lowerCamelCase ( self : Any):
'''simple docstring'''
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def _lowerCamelCase ( self : str):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
__a = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
__a = self.has_attentions
# no need to test all models as different heads yield the same functionality
__a = self.all_model_classes[0]
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
__a = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = model(**__SCREAMING_SNAKE_CASE)
__a = outputs[0][-1]
# Encoder-/Decoder-only models
__a = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__a = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(hidden_states.grad)
if self.has_attentions:
self.assertIsNotNone(attentions.grad)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(**__SCREAMING_SNAKE_CASE)
self.assertEqual(len(result.feature_maps) , len(config.out_indices))
self.assertEqual(len(model.channels) , len(config.out_indices))
# Check output of last stage is taken if out_features=None, out_indices=None
__a = copy.deepcopy(__SCREAMING_SNAKE_CASE)
__a = None
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(**__SCREAMING_SNAKE_CASE)
self.assertEqual(len(result.feature_maps) , 1)
self.assertEqual(len(model.channels) , 1)
# Check backbone can be initialized with fresh weights
__a = copy.deepcopy(__SCREAMING_SNAKE_CASE)
__a = False
__a = model_class(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(**__SCREAMING_SNAKE_CASE)
| 60 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : List[Any]=18 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : int=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=False , ):
'''simple docstring'''
__a = size if size is not None else {'''height''': 20, '''width''': 20}
__a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_reduce_labels
def _lowerCamelCase ( self : str):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(dataset[0]['''file'''] )
__a = Image.open(dataset[1]['''file'''] )
return image, map
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(ds[0]['''file'''] )
__a = Image.open(ds[1]['''file'''] )
__a = Image.open(ds[2]['''file'''] )
__a = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = BeitImageProcessingTester(self)
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std'''))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
__a = []
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
__a = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test not batched input (PIL images)
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched input (PIL images)
__a , __a = prepare_semantic_batch_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 150)
__a = True
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
| 60 | 1 |
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__snake_case :str = True
except ImportError:
__snake_case :str = False
__snake_case :int = logging.get_logger(__name__) # pylint: disable=invalid-name
def __snake_case ( _UpperCAmelCase ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class _A ( __UpperCAmelCase ):
@staticmethod
def _lowerCamelCase ( __SCREAMING_SNAKE_CASE : ArgumentParser):
'''simple docstring'''
__a = parser.add_parser('''add-new-model''')
add_new_model_parser.add_argument('''--testing''' , action='''store_true''' , help='''If in testing mode.''')
add_new_model_parser.add_argument('''--testing_file''' , type=__SCREAMING_SNAKE_CASE , help='''Configuration file on which to run.''')
add_new_model_parser.add_argument(
'''--path''' , type=__SCREAMING_SNAKE_CASE , help='''Path to cookiecutter. Should only be used for testing purposes.''')
add_new_model_parser.set_defaults(func=__SCREAMING_SNAKE_CASE)
def __init__( self : Any , __SCREAMING_SNAKE_CASE : bool , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int=None , *__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = testing
__a = testing_file
__a = path
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
warnings.warn(
'''The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '''
'''It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '''
'''checks, you should use `transformers-cli add-new-model-like` instead.''')
if not _has_cookiecutter:
raise ImportError(
'''Model creation dependencies are required to use the `add_new_model` command. Install them by running '''
'''the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n''')
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
__a = [directory for directory in os.listdir() if '''cookiecutter-template-''' == directory[:22]]
if len(__SCREAMING_SNAKE_CASE) > 0:
raise ValueError(
'''Several directories starting with `cookiecutter-template-` in current working directory. '''
'''Please clean your directory by removing all folders starting with `cookiecutter-template-` or '''
'''change your working directory.''')
__a = (
Path(__SCREAMING_SNAKE_CASE).parent.parent.parent.parent if self._path is None else Path(self._path).parent.parent
)
__a = path_to_transformer_root / '''templates''' / '''adding_a_new_model'''
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__SCREAMING_SNAKE_CASE))
else:
with open(self._testing_file , '''r''') as configuration_file:
__a = json.load(__SCREAMING_SNAKE_CASE)
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path) , no_input=__SCREAMING_SNAKE_CASE , extra_context=__SCREAMING_SNAKE_CASE , )
__a = [directory for directory in os.listdir() if '''cookiecutter-template-''' in directory[:22]][0]
# Retrieve configuration
with open(directory + '''/configuration.json''' , '''r''') as configuration_file:
__a = json.load(__SCREAMING_SNAKE_CASE)
__a = configuration['''lowercase_modelname''']
__a = configuration['''generate_tensorflow_pytorch_and_flax''']
os.remove(F'{directory}/configuration.json')
__a = '''PyTorch''' in generate_tensorflow_pytorch_and_flax
__a = '''TensorFlow''' in generate_tensorflow_pytorch_and_flax
__a = '''Flax''' in generate_tensorflow_pytorch_and_flax
__a = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=__SCREAMING_SNAKE_CASE)
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , '''w'''):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(__SCREAMING_SNAKE_CASE : List[str]):
with open(__SCREAMING_SNAKE_CASE , '''r''') as f:
__a = f.readlines()
with open(__SCREAMING_SNAKE_CASE , '''w''') as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__SCREAMING_SNAKE_CASE)
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py')
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py')
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py')
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py')
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py')
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py')
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py')
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py')
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py')
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str]):
# Create temp file
__a , __a = mkstemp()
__a = False
with fdopen(__SCREAMING_SNAKE_CASE , '''w''') as new_file:
with open(__SCREAMING_SNAKE_CASE) as old_file:
for line in old_file:
new_file.write(__SCREAMING_SNAKE_CASE)
if line_to_copy_below in line:
__a = True
for line_to_copy in lines_to_copy:
new_file.write(__SCREAMING_SNAKE_CASE)
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.')
# Copy the file permissions from the old file to the new file
copymode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Remove original file
remove(__SCREAMING_SNAKE_CASE)
# Move new file
move(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def skip_units(__SCREAMING_SNAKE_CASE : Union[str, Any]):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__SCREAMING_SNAKE_CASE : str):
with open(__SCREAMING_SNAKE_CASE) as datafile:
__a = []
__a = False
__a = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
__a = line.split('''"''')[1]
__a = skip_units(__SCREAMING_SNAKE_CASE)
elif "# Below: " in line and "##" not in line:
__a = line.split('''"''')[1]
__a = skip_units(__SCREAMING_SNAKE_CASE)
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = []
elif "# Replace with" in line and "##" not in line:
__a = []
elif "##" not in line:
lines_to_copy.append(__SCREAMING_SNAKE_CASE)
remove(__SCREAMING_SNAKE_CASE)
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py')
os.rmdir(__SCREAMING_SNAKE_CASE)
| 60 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : int):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''])
for i, r in enumerate(__SCREAMING_SNAKE_CASE):
self.assertDictEqual(__SCREAMING_SNAKE_CASE , example_records[i])
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
__a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def _lowerCamelCase ( self : int): # checks what happens with missing columns
'''simple docstring'''
__a = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertDictEqual(dset[0] , {'''col_1''': 1})
self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns
def _lowerCamelCase ( self : Optional[Any]): # checks if the type can be inferred from the second record
'''simple docstring'''
__a = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''')))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = Dataset.from_list([])
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 0)
self.assertListEqual(dset.column_names , [])
| 60 | 1 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _A ( unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = inspect.getfile(accelerate.test_utils )
UpperCamelCase__ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
UpperCamelCase__ : List[str] = ['''accelerate''', '''launch''']
UpperCamelCase__ : int = Path.home() / '''.cache/huggingface/accelerate'''
UpperCamelCase__ : List[str] = '''default_config.yaml'''
UpperCamelCase__ : Union[str, Any] = config_folder / config_file
UpperCamelCase__ : int = config_folder / '''_default_config.yaml'''
UpperCamelCase__ : Any = Path('''tests/test_configs''' )
@classmethod
def _lowerCamelCase ( cls : Optional[int]):
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def _lowerCamelCase ( cls : Optional[int]):
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy())
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
for config in sorted(self.test_config_path.glob('''**/*.yaml''')):
with self.subTest(config_file=__SCREAMING_SNAKE_CASE):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(__SCREAMING_SNAKE_CASE), self.test_file_path] , env=os.environ.copy())
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy())
class _A ( unittest.TestCase ):
UpperCamelCase__ : Tuple = '''test-tpu'''
UpperCamelCase__ : Union[str, Any] = '''us-central1-a'''
UpperCamelCase__ : List[Any] = '''ls'''
UpperCamelCase__ : Union[str, Any] = ['''accelerate''', '''tpu-config''']
UpperCamelCase__ : Any = '''cd /usr/share'''
UpperCamelCase__ : str = '''tests/test_samples/test_command_file.sh'''
UpperCamelCase__ : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=__SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=__SCREAMING_SNAKE_CASE)
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=__SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , __SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=__SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' , __SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=__SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=__SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , __SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=__SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' , __SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=__SCREAMING_SNAKE_CASE , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' , __SCREAMING_SNAKE_CASE , )
| 60 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( _UpperCAmelCase ):
__a = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __snake_case ( _UpperCAmelCase ):
__a = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def __snake_case ( ):
__a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a = [2, 2, 20]
__a = [3, 12, 16]
__a = [192, 768, 1024]
__a = CvtForImageClassification(_UpperCAmelCase )
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__a = image_size
__a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) )
__a = OrderedDict()
__a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__a = list_of_state_dict + cls_token(_UpperCAmelCase )
__a = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
__a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__snake_case :str = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case :Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 60 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__snake_case :Tuple = logging.get_logger(__name__)
class _A :
UpperCamelCase__ : str
UpperCamelCase__ : str = None
@staticmethod
def _lowerCamelCase ( ):
'''simple docstring'''
raise NotImplementedError
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
raise NotImplementedError
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
raise NotImplementedError
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.')
@classmethod
def _lowerCamelCase ( cls : int):
'''simple docstring'''
return F'`pip install {cls.pip_package or cls.name}`'
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = '''optuna'''
@staticmethod
def _lowerCamelCase ( ):
'''simple docstring'''
return is_optuna_available()
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
return run_hp_search_optuna(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
return default_hp_space_optuna(__SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = '''ray'''
UpperCamelCase__ : int = '''\'ray[tune]\''''
@staticmethod
def _lowerCamelCase ( ):
'''simple docstring'''
return is_ray_available()
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
return run_hp_search_ray(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
return default_hp_space_ray(__SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = '''sigopt'''
@staticmethod
def _lowerCamelCase ( ):
'''simple docstring'''
return is_sigopt_available()
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
return run_hp_search_sigopt(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return default_hp_space_sigopt(__SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Union[str, Any] = '''wandb'''
@staticmethod
def _lowerCamelCase ( ):
'''simple docstring'''
return is_wandb_available()
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
return run_hp_search_wandb(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
return default_hp_space_wandb(__SCREAMING_SNAKE_CASE)
__snake_case :Union[str, Any] = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __snake_case ( ):
__a = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(_UpperCAmelCase ) > 0:
__a = available_backends[0].name
if len(_UpperCAmelCase ) > 1:
logger.info(
f'{len(_UpperCAmelCase )} hyperparameter search backends available. Using {name} as the default.' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f' - To install {backend.name} run {backend.pip_install()}'
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 60 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( _UpperCAmelCase ):
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class _A ( __UpperCAmelCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
@torch.no_grad()
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = 1
elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
__a = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}')
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = preprocess(__SCREAMING_SNAKE_CASE)
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters()).dtype
__a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE)
__a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(__SCREAMING_SNAKE_CASE):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1)
__a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# predict the noise residual
__a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample
__a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0)
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
| 60 | 1 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__snake_case :Dict = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
__snake_case :str = parser.parse_args()
if args.model_type == "roberta":
__snake_case :List[str] = RobertaForMaskedLM.from_pretrained(args.model_name)
__snake_case :Dict = '''roberta'''
elif args.model_type == "gpt2":
__snake_case :Dict = GPTaLMHeadModel.from_pretrained(args.model_name)
__snake_case :str = '''transformer'''
__snake_case :str = model.state_dict()
__snake_case :List[str] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__snake_case :str = state_dict[f'{prefix}.{param_name}']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__snake_case :Optional[int] = f'{prefix}.embeddings.{w}.weight'
__snake_case :List[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
__snake_case :Tuple = f'{prefix}.embeddings.LayerNorm.{w}'
__snake_case :Optional[Any] = state_dict[param_name]
# Transformer Blocks #
__snake_case :int = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__snake_case :Tuple = state_dict[
f'{prefix}.h.{teacher_idx}.{layer}.{w}'
]
__snake_case :Any = state_dict[f'{prefix}.h.{teacher_idx}.attn.bias']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__snake_case :List[str] = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__snake_case :str = state_dict[f'{layer}']
if args.vocab_transform:
for w in ["weight", "bias"]:
__snake_case :int = state_dict[f'lm_head.dense.{w}']
__snake_case :Tuple = state_dict[f'lm_head.layer_norm.{w}']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__snake_case :List[str] = state_dict[f'{prefix}.ln_f.{w}']
__snake_case :List[Any] = state_dict['''lm_head.weight''']
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 60 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__snake_case :Any = TypeVar('''KT''')
__snake_case :List[str] = TypeVar('''VT''')
class _A ( Generic[KT, VT] ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None):
'''simple docstring'''
__a = key
__a = value
__a = []
def __repr__( self : Dict):
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return len(self.forward)
class _A ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16):
'''simple docstring'''
__a = Node[KT, VT]()
__a = 0
__a = p
__a = max_level
def __str__( self : Union[str, Any]):
'''simple docstring'''
__a = list(self)
if len(__SCREAMING_SNAKE_CASE) == 0:
return F'SkipList(level={self.level})'
__a = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4)
__a = max(__SCREAMING_SNAKE_CASE , 4) + 4
__a = self.head
__a = []
__a = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + '''* ''' * len(__SCREAMING_SNAKE_CASE))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
while len(node.forward) != 0:
__a = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''')
+ ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
__a = node.forward
lines.append('''None'''.ljust(__SCREAMING_SNAKE_CASE) + '''* ''' * len(__SCREAMING_SNAKE_CASE))
return F'SkipList(level={self.level})\n' + "\n".join(__SCREAMING_SNAKE_CASE)
def __iter__( self : int):
'''simple docstring'''
__a = self.head
while len(node.forward) != 0:
yield node.forward[0].key
__a = node.forward[0]
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = []
__a = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__a = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : KT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__a = node.forward[i]
else:
__a = update_node.forward[:i]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
__a = value
else:
__a = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE):
update_vector.append(self.head)
__a = level
__a = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE)
else:
__a = new_node
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
return node.value
return None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
if len(_UpperCAmelCase ) != 4:
print()
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __snake_case ( ):
__a = SkipList()
assert skip_list.find('''Some key''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def __snake_case ( ):
__a = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(_UpperCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __snake_case ( ):
def is_sorted(_UpperCAmelCase ):
return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) )
__a = SkipList()
for i in range(10 ):
skip_list.insert(_UpperCAmelCase , _UpperCAmelCase )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCAmelCase ) )
def __snake_case ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __snake_case ( ):
__a = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 1 |
import math
import sys
def __snake_case ( _UpperCAmelCase ):
__a = ''''''
try:
with open(_UpperCAmelCase , '''rb''' ) as binary_file:
__a = binary_file.read()
for dat in data:
__a = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('''File not accessible''' )
sys.exit()
def __snake_case ( _UpperCAmelCase ):
__a = {'''0''': '''0''', '''1''': '''1'''}
__a , __a = '''''', ''''''
__a = len(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__a = lexicon[curr_string]
result += last_match_id
__a = last_match_id + '''0'''
if math.loga(_UpperCAmelCase ).is_integer():
__a = {}
for curr_key in list(_UpperCAmelCase ):
__a = lexicon.pop(_UpperCAmelCase )
__a = new_lex
__a = last_match_id + '''1'''
index += 1
__a = ''''''
return result
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = 8
try:
with open(_UpperCAmelCase , '''wb''' ) as opened_file:
__a = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase ) , _UpperCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('''10000000''' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCAmelCase , 2 ).to_bytes(1 , byteorder='''big''' ) )
except OSError:
print('''File not accessible''' )
sys.exit()
def __snake_case ( _UpperCAmelCase ):
__a = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
__a = data_bits[counter:]
__a = data_bits[counter + 1 :]
return data_bits
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = read_file_binary(_UpperCAmelCase )
__a = remove_prefix(_UpperCAmelCase )
__a = decompress_data(_UpperCAmelCase )
write_file_binary(_UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 60 |
__snake_case :str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Return True if there is node that has not iterated.
__a = [False] * len(_UpperCAmelCase )
__a = [s]
__a = True
while queue:
__a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCAmelCase )
__a = True
__a = u
return visited[t]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [-1] * (len(_UpperCAmelCase ))
__a = 0
__a = []
__a = [i[:] for i in graph] # Record original cut, copy.
while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = float('''Inf''' )
__a = sink
while s != source:
# Find the minimum value in select path
__a = min(_UpperCAmelCase , graph[parent[s]][s] )
__a = parent[s]
max_flow += path_flow
__a = sink
while v != source:
__a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a = parent[v]
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 60 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__snake_case :Tuple = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Tuple , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str], "Image", List["Image"]] , **__SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
return super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Optional[int]="This is a photo of {}."):
'''simple docstring'''
__a = load_image(__SCREAMING_SNAKE_CASE)
__a = self.image_processor(images=[image] , return_tensors=self.framework)
__a = candidate_labels
__a = [hypothesis_template.format(__SCREAMING_SNAKE_CASE) for x in candidate_labels]
__a = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework , padding=__SCREAMING_SNAKE_CASE)
__a = [text_inputs]
return inputs
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = model_inputs.pop('''candidate_labels''')
__a = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , __SCREAMING_SNAKE_CASE):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = model_outputs.pop('''candidate_labels''')
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=-1).squeeze(-1)
__a = probs.tolist()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = [scores]
elif self.framework == "tf":
__a = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1)
__a = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) , key=lambda __SCREAMING_SNAKE_CASE: -x[0])
]
return result
| 60 |
from __future__ import annotations
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(_UpperCAmelCase ):
print(f'{i}\t\t{d}' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [float('''inf''' )] * vertex_count
__a = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__a = distance[u] + w
__a = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case :Dict = int(input('''Enter number of vertices: ''').strip())
__snake_case :Any = int(input('''Enter number of edges: ''').strip())
__snake_case :list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__snake_case ,__snake_case ,__snake_case :int = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__snake_case :Any = {'''src''': src, '''dst''': dest, '''weight''': weight}
__snake_case :List[str] = int(input('''\nEnter shortest path source:''').strip())
__snake_case :Optional[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 60 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case :List[Any] = logging.get_logger(__name__)
__snake_case :str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__snake_case :List[str] = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__snake_case :int = {
'''allenai/longformer-base-4096''': 4096,
'''allenai/longformer-large-4096''': 4096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __snake_case ( ):
__a = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__a = bs[:]
__a = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCAmelCase )
cs.append(2**8 + n )
n += 1
__a = [chr(_UpperCAmelCase ) for n in cs]
return dict(zip(_UpperCAmelCase , _UpperCAmelCase ) )
def __snake_case ( _UpperCAmelCase ):
__a = set()
__a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__a = char
return pairs
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : str = VOCAB_FILES_NAMES
UpperCamelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self : int , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any]="replace" , __SCREAMING_SNAKE_CASE : Union[str, Any]="<s>" , __SCREAMING_SNAKE_CASE : List[Any]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : Optional[int]="<s>" , __SCREAMING_SNAKE_CASE : str="<unk>" , __SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , __SCREAMING_SNAKE_CASE : int="<mask>" , __SCREAMING_SNAKE_CASE : Any=False , **__SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else bos_token
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else eos_token
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else sep_token
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else cls_token
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else unk_token
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token
super().__init__(
errors=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''') as vocab_handle:
__a = json.load(__SCREAMING_SNAKE_CASE)
__a = {v: k for k, v in self.encoder.items()}
__a = errors # how to handle errors in decoding
__a = bytes_to_unicode()
__a = {v: k for k, v in self.byte_encoder.items()}
with open(__SCREAMING_SNAKE_CASE , encoding='''utf-8''') as merges_handle:
__a = merges_handle.read().split('''\n''')[1:-1]
__a = [tuple(merge.split()) for merge in bpe_merges]
__a = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
__a = {}
__a = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__a = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''')
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return len(self.encoder)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__a = tuple(__SCREAMING_SNAKE_CASE)
__a = get_pairs(__SCREAMING_SNAKE_CASE)
if not pairs:
return token
while True:
__a = min(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE: self.bpe_ranks.get(__SCREAMING_SNAKE_CASE , float('''inf''')))
if bigram not in self.bpe_ranks:
break
__a , __a = bigram
__a = []
__a = 0
while i < len(__SCREAMING_SNAKE_CASE):
try:
__a = word.index(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__a = j
if word[i] == first and i < len(__SCREAMING_SNAKE_CASE) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__a = tuple(__SCREAMING_SNAKE_CASE)
__a = new_word
if len(__SCREAMING_SNAKE_CASE) == 1:
break
else:
__a = get_pairs(__SCREAMING_SNAKE_CASE)
__a = ''' '''.join(__SCREAMING_SNAKE_CASE)
__a = word
return word
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = []
for token in re.findall(self.pat , __SCREAMING_SNAKE_CASE):
__a = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__SCREAMING_SNAKE_CASE).split(''' '''))
return bpe_tokens
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
return self.encoder.get(__SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token))
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
return self.decoder.get(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = ''''''.join(__SCREAMING_SNAKE_CASE)
__a = bytearray([self.byte_decoder[c] for c in text]).decode('''utf-8''' , errors=self.errors)
return text
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''])
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE , ensure_ascii=__SCREAMING_SNAKE_CASE) + '''\n''')
__a = 0
with open(__SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''') as writer:
writer.write('''#version: 0.2\n''')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __SCREAMING_SNAKE_CASE: kv[1]):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''')
__a = token_index
writer.write(''' '''.join(__SCREAMING_SNAKE_CASE) + '''\n''')
index += 1
return vocab_file, merge_file
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE)
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE)) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE)) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE)) + [1]
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str=False , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
__a = kwargs.pop('''add_prefix_space''' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(__SCREAMING_SNAKE_CASE) > 0 and not text[0].isspace()):
__a = ''' ''' + text
return (text, kwargs)
| 60 |
import os
import sys
import unittest
__snake_case :Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__snake_case :List[str] = os.path.join(git_repo_path, '''src''', '''transformers''')
__snake_case :Any = '''
{0} = None
'''
__snake_case :Dict = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
__snake_case :str = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(__SCREAMING_SNAKE_CASE)
__a = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''tokenizers''')
__a = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''tensorflow_text''')
__a = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers''')
__a = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tensorflow_text''')
__a = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers_and_vision''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''tensorflow_text''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''sentencepiece_and_tokenizers''' , __SCREAMING_SNAKE_CASE)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''\nCONSTANT = None\n''')
__a = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
__SCREAMING_SNAKE_CASE , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
__a = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__a = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__a = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , __SCREAMING_SNAKE_CASE)
| 60 | 1 |
from __future__ import annotations
import requests
def __snake_case ( _UpperCAmelCase ):
__a = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(_UpperCAmelCase ).json()
def __snake_case ( _UpperCAmelCase = 10 ):
__a = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
__a = requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def __snake_case ( _UpperCAmelCase = 10 ):
__a = hackernews_top_stories(_UpperCAmelCase )
return "\n".join('''* [{title}]({url})'''.format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 60 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__snake_case :str = get_logger()
__snake_case :Optional[dict] = None
class _A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
super().__init__(features=__SCREAMING_SNAKE_CASE)
import jax
from jaxlib.xla_client import Device
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(
F'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` '
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''')
__a = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default '
F'device: {str(jax.devices()[0])}.')
__a = str(jax.devices()[0])
__a = jnp_array_kwargs
@staticmethod
def _lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(__SCREAMING_SNAKE_CASE): device for device in jax.devices()}
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and column:
if all(
isinstance(__SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0)
return column
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE))):
return value
elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__a = {}
if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__a = {'''dtype''': jnp.intaa}
else:
__a = {'''dtype''': jnp.intaa}
elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__a = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = np.asarray(__SCREAMING_SNAKE_CASE)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs})
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''') and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array):
__a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)):
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
return self._tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : dict):
'''simple docstring'''
return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE)
return self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0])
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
__a = self._consolidate(__SCREAMING_SNAKE_CASE)
return column
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE)
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
for column_name in batch:
__a = self._consolidate(batch[column_name])
return batch
| 60 | 1 |
import argparse
import os
import re
__snake_case :Optional[Any] = '''src/transformers/models/auto'''
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
__snake_case :Dict = re.compile(r'''[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict''')
# re pattern that matches identifiers in mappings
__snake_case :List[str] = re.compile(r'''\s*\(\s*"(\S[^"]+)"''')
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ):
with open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f:
__a = f.read()
__a = content.split('''\n''' )
__a = []
__a = 0
while line_idx < len(_UpperCAmelCase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
__a = len(re.search(R'''^(\s*)\S''' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(''' ''' * indent + '''(''' ):
new_lines.append(lines[line_idx] )
line_idx += 1
__a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
__a = line_idx
while not lines[line_idx].startswith(''' ''' * indent + ''')''' ):
line_idx += 1
blocks.append('''\n'''.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
__a = sorted(_UpperCAmelCase , key=lambda _UpperCAmelCase : _re_identifier.search(_UpperCAmelCase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(_UpperCAmelCase ) )
elif "\n".join(_UpperCAmelCase ) != content:
return True
def __snake_case ( _UpperCAmelCase = False ):
__a = [os.path.join(_UpperCAmelCase , _UpperCAmelCase ) for f in os.listdir(_UpperCAmelCase ) if f.endswith('''.py''' )]
__a = [sort_auto_mapping(_UpperCAmelCase , overwrite=_UpperCAmelCase ) for fname in fnames]
if not overwrite and any(_UpperCAmelCase ):
__a = [f for f, d in zip(_UpperCAmelCase , _UpperCAmelCase ) if d]
raise ValueError(
f'The following files have auto mappings that need sorting: {", ".join(_UpperCAmelCase )}. Run `make style` to fix'
''' this.''' )
if __name__ == "__main__":
__snake_case :str = argparse.ArgumentParser()
parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''')
__snake_case :Dict = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 60 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case :Tuple = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case :Union[str, Any] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0522, type=int)
__snake_case :List[str] = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
__snake_case :Optional[Any] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__snake_case :Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case :Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
__snake_case :Any = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case :List[str] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
__snake_case :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__snake_case :List[str] = HfApi()
__snake_case :str = {}
# fmt: off
__snake_case :Optional[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
__snake_case :Union[str, Any] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
__snake_case :str = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
__snake_case :List[Any] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
__snake_case :Any = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
__snake_case :List[str] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
__snake_case :Optional[int] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
__snake_case :Tuple = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
__snake_case :List[Any] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
__snake_case :Optional[Any] = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
__snake_case :Optional[Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
__snake_case :List[str] = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
__snake_case :Any = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
__snake_case :List[str] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
__snake_case :Union[str, Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
__snake_case :List[Any] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
__snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__snake_case :str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__snake_case :List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__snake_case :Any = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 60 | 1 |
def __snake_case ( _UpperCAmelCase ):
__a = []
__a = set({'''(''', '''[''', '''{'''} )
__a = set({''')''', ''']''', '''}'''} )
__a = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(_UpperCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_UpperCAmelCase ) == 0 or (len(_UpperCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_UpperCAmelCase ) == 0
def __snake_case ( ):
__a = input('''Enter sequence of brackets: ''' )
if is_balanced(_UpperCAmelCase ):
print(_UpperCAmelCase , '''is balanced''' )
else:
print(_UpperCAmelCase , '''is not balanced''' )
if __name__ == "__main__":
main()
| 60 |
from collections.abc import Generator
from math import sin
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''08x''' )[-8:]
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __snake_case ( _UpperCAmelCase ):
__a = b''''''
for char in message:
bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' )
__a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCAmelCase ) , 512 ):
__a = bit_string[pos : pos + 512]
__a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''032b''' )
__a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCAmelCase , 2 )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (a + b) % 2**32
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __snake_case ( _UpperCAmelCase ):
__a = preprocess(_UpperCAmelCase )
__a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__a = 0X67_452_301
__a = 0Xef_cda_b89
__a = 0X98_bad_cfe
__a = 0X10_325_476
__a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCAmelCase ):
__a = aa
__a = ba
__a = ca
__a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__a = d ^ (b & (c ^ d))
__a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__a = c ^ (d & (b ^ c))
__a = (5 * i + 1) % 16
elif i <= 47:
__a = b ^ c ^ d
__a = (3 * i + 5) % 16
else:
__a = c ^ (b | not_aa(_UpperCAmelCase ))
__a = (7 * i) % 16
__a = (f + a + added_consts[i] + block_words[g]) % 2**32
__a = d
__a = c
__a = b
__a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 1 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = [1]
for i in range(2 , _UpperCAmelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__a = []
__a = list(range(_UpperCAmelCase ) )
# Find permutation
while factorials:
__a = factorials.pop()
__a , __a = divmod(_UpperCAmelCase , _UpperCAmelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__snake_case :List[Any] = '''zero2'''
__snake_case :Optional[Any] = '''zero3'''
__snake_case :str = [ZEROa, ZEROa]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
__snake_case :List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _A ( __UpperCAmelCase ):
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
self.do_checks(__SCREAMING_SNAKE_CASE)
return output_dir
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE)
__a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__a = self.get_launcher(__SCREAMING_SNAKE_CASE)
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env())
return output_dir
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False):
'''simple docstring'''
__a = min(2 , get_gpu_count()) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 60 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case :Union[str, Any] = logging.get_logger(__name__)
__snake_case :Optional[int] = {'''tokenizer_file''': '''tokenizer.json'''}
__snake_case :Optional[int] = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Any = VOCAB_FILES_NAMES
UpperCamelCase__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : Optional[Any] = None
def __init__( self : int , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : List[Any]="<unk>" , __SCREAMING_SNAKE_CASE : List[str]="<s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="</s>" , __SCREAMING_SNAKE_CASE : Dict="<pad>" , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('''add_prefix_space''' , __SCREAMING_SNAKE_CASE) != add_prefix_space:
__a = getattr(__SCREAMING_SNAKE_CASE , pre_tok_state.pop('''type'''))
__a = add_prefix_space
__a = pre_tok_class(**__SCREAMING_SNAKE_CASE)
__a = add_prefix_space
def _lowerCamelCase ( self : Dict , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
''' pretokenized inputs.''')
return super()._batch_encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = kwargs.get('''is_split_into_words''' , __SCREAMING_SNAKE_CASE)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
''' pretokenized inputs.''')
return super()._encode_plus(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
__a = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE)
return tuple(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : "Conversation"):
'''simple docstring'''
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE) + [self.eos_token_id])
if len(__SCREAMING_SNAKE_CASE) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 60 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = f'Expected string as input, found {type(_UpperCAmelCase )}'
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = f'Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}'
raise ValueError(_UpperCAmelCase )
__a = input_str.split('''_''' )
__a = 0 if use_pascal else 1
__a = words[start_index:]
__a = [word[0].upper() + word[1:] for word in words_to_capitalize]
__a = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 60 | 1 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __snake_case ( _UpperCAmelCase ):
__a = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_UpperCAmelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__a = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__a = [[0.0, 0.0], [0.0, 0.0]]
__a , __a = matrix[1][1], matrix[0][0]
__a , __a = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_UpperCAmelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_UpperCAmelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__a = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__a = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__a = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__a = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__a = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__a = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__a = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__a = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__a = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__a = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__a = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__a = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
__a = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__a = array(_UpperCAmelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_UpperCAmelCase )
# Calculate the inverse of the matrix
return [[float(d(_UpperCAmelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 60 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__snake_case :List[str] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class _A :
UpperCamelCase__ : str
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a , __a , __a = _str_to_version_tuple(self.version_str)
def __repr__( self : Tuple):
'''simple docstring'''
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.major, self.minor, self.patch
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return Version(__SCREAMING_SNAKE_CASE)
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return other
raise TypeError(F'{other} (type {type(__SCREAMING_SNAKE_CASE)}) cannot be compared to version.')
def __eq__( self : int , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
try:
__a = self._validate_operand(__SCREAMING_SNAKE_CASE)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : str , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = self._validate_operand(__SCREAMING_SNAKE_CASE)
return self.tuple < other.tuple
def __hash__( self : Optional[Any]):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.version_str
def __snake_case ( _UpperCAmelCase ):
__a = _VERSION_REG.match(_UpperCAmelCase )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(_UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def __snake_case ( _UpperCAmelCase ):
return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
| 60 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = 0
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''')
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__a = Path(__SCREAMING_SNAKE_CASE) / '''preprocessor_config.json'''
__a = Path(__SCREAMING_SNAKE_CASE) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w'''))
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__a = Path(__SCREAMING_SNAKE_CASE) / '''preprocessor_config.json'''
__a = Path(__SCREAMING_SNAKE_CASE) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w'''))
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__a = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__a = Path(__SCREAMING_SNAKE_CASE) / '''preprocessor_config.json'''
__a = Path(__SCREAMING_SNAKE_CASE) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w'''))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE).to_dict()
config_dict.pop('''image_processor_type''')
__a = CLIPImageProcessor(**__SCREAMING_SNAKE_CASE)
# save in new folder
model_config.save_pretrained(__SCREAMING_SNAKE_CASE)
config.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
# make sure private variable is not incorrectly saved
__a = json.loads(config.to_json_string())
self.assertTrue('''_processor_class''' not in dict_as_saved)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
__a = Path(__SCREAMING_SNAKE_CASE) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''') , )
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''clip-base is not a local folder and is not a valid model identifier'''):
__a = AutoImageProcessor.from_pretrained('''clip-base''')
def _lowerCamelCase ( self : int):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''):
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , revision='''aaaaaa''')
def _lowerCamelCase ( self : int):
'''simple docstring'''
with self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
__a = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''')
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
with self.assertRaises(__SCREAMING_SNAKE_CASE):
__a = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''')
# If remote code is disabled, we can't load this config.
with self.assertRaises(__SCREAMING_SNAKE_CASE):
__a = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE)
__a = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE , trust_remote_code=__SCREAMING_SNAKE_CASE)
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''')
def _lowerCamelCase ( self : Any):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE)
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__SCREAMING_SNAKE_CASE):
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
with tempfile.TemporaryDirectory() as tmpdirname:
__a = Path(__SCREAMING_SNAKE_CASE) / '''preprocessor_config.json'''
__a = Path(__SCREAMING_SNAKE_CASE) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__SCREAMING_SNAKE_CASE , '''w''') , )
json.dump({'''model_type''': '''clip'''} , open(__SCREAMING_SNAKE_CASE , '''w'''))
__a = CustomImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = AutoImageProcessor.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = True
try:
AutoConfig.register('''custom''' , __SCREAMING_SNAKE_CASE)
AutoImageProcessor.register(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# If remote code is not set, the default is to use local
__a = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''')
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
__a = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
__a = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''')
self.assertTrue(not hasattr(__SCREAMING_SNAKE_CASE , '''is_local'''))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 60 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case :int = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _A ( tr.AbstractTransform ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "):
'''simple docstring'''
__a = sentence_delimiter
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return list(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = []
for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE):
chars.extend(self.process_string(__SCREAMING_SNAKE_CASE))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1:
chars.append(self.sentence_delimiter)
return chars
__snake_case :Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case :Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case :Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case :Tuple = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case :Tuple = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 60 | 1 |
def __snake_case ( _UpperCAmelCase ):
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__a = grid[0]
for row_n in range(1 , len(_UpperCAmelCase ) ):
__a = grid[row_n]
__a = fill_row(_UpperCAmelCase , _UpperCAmelCase )
__a = grid[row_n]
return grid[-1][-1]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
current_row[0] += row_above[0]
for cell_n in range(1 , len(_UpperCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case :Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = ['''ViTFeatureExtractor''']
__snake_case :Optional[Any] = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str]=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : Tuple=30 , __SCREAMING_SNAKE_CASE : Dict=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Dict=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=1 / 255 , __SCREAMING_SNAKE_CASE : Dict=True , ):
'''simple docstring'''
__a = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333}
__a = parent
__a = batch_size
__a = num_channels
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_rescale
__a = rescale_factor
__a = do_pad
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]=False):
'''simple docstring'''
if not batched:
__a = image_inputs[0]
if isinstance(__SCREAMING_SNAKE_CASE , Image.Image):
__a , __a = image.size
else:
__a , __a = image.shape[1], image.shape[2]
if w < h:
__a = int(self.size['''shortest_edge'''] * h / w)
__a = self.size['''shortest_edge''']
elif w > h:
__a = self.size['''shortest_edge''']
__a = int(self.size['''shortest_edge'''] * w / h)
else:
__a = self.size['''shortest_edge''']
__a = self.size['''shortest_edge''']
else:
__a = []
for image in image_inputs:
__a , __a = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__a = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE: item[0])[0]
__a = max(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : str = YolosImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = YolosImageProcessingTester(self)
@property
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size'''))
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333})
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE)
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a , __a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE)
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
__a , __a = self.image_processor_tester.get_expected_values(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
__a = self.image_processing_class(do_resize=__SCREAMING_SNAKE_CASE , do_normalize=__SCREAMING_SNAKE_CASE , do_rescale=__SCREAMING_SNAKE_CASE)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
# Test whether the method "pad" and calling the image processor return the same tensors
__a = image_processing_a.pad(__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
__a = image_processing_a(__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1E-4))
@slow
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
__a = json.loads(f.read())
__a = {'''image_id''': 39_769, '''annotations''': target}
# encode them
__a = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''')
__a = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
# verify pixel values
__a = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
# verify area
__a = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __SCREAMING_SNAKE_CASE))
# verify boxes
__a = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __SCREAMING_SNAKE_CASE , atol=1E-3))
# verify image_id
__a = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __SCREAMING_SNAKE_CASE))
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __SCREAMING_SNAKE_CASE))
# verify class_labels
__a = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __SCREAMING_SNAKE_CASE))
# verify orig_size
__a = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __SCREAMING_SNAKE_CASE))
# verify size
__a = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __SCREAMING_SNAKE_CASE))
@slow
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
__a = json.loads(f.read())
__a = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target}
__a = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
__a = YolosImageProcessor(format='''coco_panoptic''')
__a = image_processing(images=__SCREAMING_SNAKE_CASE , annotations=__SCREAMING_SNAKE_CASE , masks_path=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
# verify pixel values
__a = torch.Size([1, 3, 800, 1_066])
self.assertEqual(encoding['''pixel_values'''].shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
# verify area
__a = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , __SCREAMING_SNAKE_CASE))
# verify boxes
__a = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , __SCREAMING_SNAKE_CASE , atol=1E-3))
# verify image_id
__a = torch.tensor([39_769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , __SCREAMING_SNAKE_CASE))
# verify is_crowd
__a = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , __SCREAMING_SNAKE_CASE))
# verify class_labels
__a = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , __SCREAMING_SNAKE_CASE))
# verify masks
__a = 822_873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , __SCREAMING_SNAKE_CASE)
# verify orig_size
__a = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , __SCREAMING_SNAKE_CASE))
# verify size
__a = torch.tensor([800, 1_066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , __SCREAMING_SNAKE_CASE))
| 60 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[str] = GPTSwaTokenizer
UpperCamelCase__ : Dict = False
UpperCamelCase__ : int = True
UpperCamelCase__ : List[Any] = False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = '''This is a test'''
__a = '''This is a test'''
return input_text, output_text
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = '''<s>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2_000)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_000)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [465, 287, 265, 631, 842])
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__a = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# Test that decode_fast returns the input text
for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__snake_case :Optional[int] = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :int = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :int = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[Any] = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__snake_case :Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
from __future__ import annotations
__snake_case :Optional[Any] = []
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if row >= len(_UpperCAmelCase ):
solution.append(_UpperCAmelCase )
printboard(_UpperCAmelCase )
print()
return True
for i in range(len(_UpperCAmelCase ) ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = 1
solve(_UpperCAmelCase , row + 1 )
__a = 0
return False
def __snake_case ( _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(_UpperCAmelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__snake_case :Optional[Any] = 8
__snake_case :Tuple = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 60 | 1 |
from sklearn.metrics import matthews_corrcoef
import datasets
__snake_case :Tuple = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
__snake_case :Union[str, Any] = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
__snake_case :Optional[int] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32'''),
'''references''': datasets.Value('''int32'''),
}) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple=None):
'''simple docstring'''
return {
"matthews_correlation": float(matthews_corrcoef(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sample_weight=__SCREAMING_SNAKE_CASE)),
}
| 60 |
def __snake_case ( _UpperCAmelCase ):
__a = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __snake_case ( _UpperCAmelCase ):
__a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__a = remove_duplicates(key.upper() )
__a = len(_UpperCAmelCase )
# First fill cipher with key characters
__a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_UpperCAmelCase ) , 26 ):
__a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__a = alphabet[i - offset]
__a = char
return cipher_alphabet
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( ):
__a = input('''Enter message to encode or decode: ''' ).strip()
__a = input('''Enter keyword: ''' ).strip()
__a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__a = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__a = create_cipher_map(_UpperCAmelCase )
print(func(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ):
__a = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=_UpperCAmelCase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=_UpperCAmelCase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=_UpperCAmelCase )
return parser.parse_args()
def __snake_case ( ):
__a = parse_args()
# Import training_script as a module.
__a = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__a = script_fpath.stem
__a = importlib.import_module(_UpperCAmelCase )
# Patch sys.argv
__a = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 60 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__snake_case :List[Any] = None
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case :Union[str, Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__snake_case :Optional[Any] = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__snake_case :Optional[int] = '''▁'''
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : Dict = BarthezTokenizer
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = vocab_file
__a = False if not self.vocab_file else True
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 60 | 1 |
from __future__ import annotations
def __snake_case ( _UpperCAmelCase ): # This function is recursive
__a = len(_UpperCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
__a = array[0]
__a = False
__a = 1
__a = []
while not is_found and i < array_length:
if array[i] < pivot:
__a = True
__a = [element for element in array[i:] if element >= array[i]]
__a = longest_subsequence(_UpperCAmelCase )
if len(_UpperCAmelCase ) > len(_UpperCAmelCase ):
__a = temp_array
else:
i += 1
__a = [element for element in array[1:] if element >= pivot]
__a = [pivot, *longest_subsequence(_UpperCAmelCase )]
if len(_UpperCAmelCase ) > len(_UpperCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__snake_case :Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__snake_case :Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __snake_case ( _UpperCAmelCase ):
__a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(rows * cols * num_images )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
__a = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 )
return data
@deprecated(_UpperCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = labels_dense.shape[0]
__a = numpy.arange(_UpperCAmelCase ) * num_classes
__a = numpy.zeros((num_labels, num_classes) )
__a = 1
return labels_one_hot
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(_UpperCAmelCase )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase )
return labels
class _A :
@deprecated(
__SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Any=dtypes.floataa , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=None , ):
'''simple docstring'''
__a , __a = random_seed.get_seed(__SCREAMING_SNAKE_CASE)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
__a = dtypes.as_dtype(__SCREAMING_SNAKE_CASE).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype)
if fake_data:
__a = 10_000
__a = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
__a = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__a = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__a = images.astype(numpy.floataa)
__a = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_55.0)
__a = images
__a = labels
__a = 0
__a = 0
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self._images
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self._labels
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._num_examples
@property
def _lowerCamelCase ( self : str):
'''simple docstring'''
return self._epochs_completed
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True):
'''simple docstring'''
if fake_data:
__a = [1] * 784
__a = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__SCREAMING_SNAKE_CASE)],
[fake_label for _ in range(__SCREAMING_SNAKE_CASE)],
)
__a = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perma]
__a = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__a = self._num_examples - start
__a = self._images[start : self._num_examples]
__a = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perm]
__a = self.labels[perm]
# Start next epoch
__a = 0
__a = batch_size - rest_num_examples
__a = self._index_in_epoch
__a = self._images[start:end]
__a = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0),
)
else:
self._index_in_epoch += batch_size
__a = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase , '''Please write your own downloading logic.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
__a = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
__a = f.size()
print('''Successfully downloaded''' , _UpperCAmelCase , _UpperCAmelCase , '''bytes.''' )
return filepath
@deprecated(
_UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=5000 , _UpperCAmelCase=None , _UpperCAmelCase=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase )
__a = fake()
__a = fake()
__a = fake()
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
if not source_url: # empty string check
__a = DEFAULT_SOURCE_URL
__a = '''train-images-idx3-ubyte.gz'''
__a = '''train-labels-idx1-ubyte.gz'''
__a = '''t10k-images-idx3-ubyte.gz'''
__a = '''t10k-labels-idx1-ubyte.gz'''
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
__a = (
'''Validation size should be between 0 and '''
f'{len(_UpperCAmelCase )}. Received: {validation_size}.'
)
raise ValueError(_UpperCAmelCase )
__a = train_images[:validation_size]
__a = train_labels[:validation_size]
__a = train_images[validation_size:]
__a = train_labels[validation_size:]
__a = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
| 60 | 1 |
from __future__ import annotations
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(_UpperCAmelCase ):
print(f'{i}\t\t{d}' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [float('''inf''' )] * vertex_count
__a = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__a = distance[u] + w
__a = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case :Dict = int(input('''Enter number of vertices: ''').strip())
__snake_case :Any = int(input('''Enter number of edges: ''').strip())
__snake_case :list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__snake_case ,__snake_case ,__snake_case :int = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__snake_case :Any = {'''src''': src, '''dst''': dest, '''weight''': weight}
__snake_case :List[str] = int(input('''\nEnter shortest path source:''').strip())
__snake_case :Optional[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 60 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : List[Any]=18 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : int=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=False , ):
'''simple docstring'''
__a = size if size is not None else {'''height''': 20, '''width''': 20}
__a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_reduce_labels
def _lowerCamelCase ( self : str):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(dataset[0]['''file'''] )
__a = Image.open(dataset[1]['''file'''] )
return image, map
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(ds[0]['''file'''] )
__a = Image.open(ds[1]['''file'''] )
__a = Image.open(ds[2]['''file'''] )
__a = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = BeitImageProcessingTester(self)
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std'''))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
__a = []
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
__a = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test not batched input (PIL images)
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched input (PIL images)
__a , __a = prepare_semantic_batch_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 150)
__a = True
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
| 60 | 1 |
from math import pi
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 60 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : int):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''])
for i, r in enumerate(__SCREAMING_SNAKE_CASE):
self.assertDictEqual(__SCREAMING_SNAKE_CASE , example_records[i])
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
__a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def _lowerCamelCase ( self : int): # checks what happens with missing columns
'''simple docstring'''
__a = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertDictEqual(dset[0] , {'''col_1''': 1})
self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns
def _lowerCamelCase ( self : Optional[Any]): # checks if the type can be inferred from the second record
'''simple docstring'''
__a = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''')))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = Dataset.from_list([])
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 0)
self.assertListEqual(dset.column_names , [])
| 60 | 1 |
__snake_case :str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Return True if there is node that has not iterated.
__a = [False] * len(_UpperCAmelCase )
__a = [s]
__a = True
while queue:
__a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCAmelCase )
__a = True
__a = u
return visited[t]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [-1] * (len(_UpperCAmelCase ))
__a = 0
__a = []
__a = [i[:] for i in graph] # Record original cut, copy.
while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = float('''Inf''' )
__a = sink
while s != source:
# Find the minimum value in select path
__a = min(_UpperCAmelCase , graph[parent[s]][s] )
__a = parent[s]
max_flow += path_flow
__a = sink
while v != source:
__a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a = parent[v]
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 60 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( _UpperCAmelCase ):
__a = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __snake_case ( _UpperCAmelCase ):
__a = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def __snake_case ( ):
__a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a = [2, 2, 20]
__a = [3, 12, 16]
__a = [192, 768, 1024]
__a = CvtForImageClassification(_UpperCAmelCase )
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__a = image_size
__a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) )
__a = OrderedDict()
__a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__a = list_of_state_dict + cls_token(_UpperCAmelCase )
__a = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
__a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__snake_case :str = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case :Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__snake_case :Optional[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Dict = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__snake_case :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( _UpperCAmelCase ):
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class _A ( __UpperCAmelCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
@torch.no_grad()
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = 1
elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
__a = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}')
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = preprocess(__SCREAMING_SNAKE_CASE)
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters()).dtype
__a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE)
__a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(__SCREAMING_SNAKE_CASE):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1)
__a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# predict the noise residual
__a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample
__a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0)
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
| 60 | 1 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _A :
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple=13 , __SCREAMING_SNAKE_CASE : Union[str, Any]=30 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[Any]=32 , __SCREAMING_SNAKE_CASE : Optional[Any]=5 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : Any=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__a = (image_size // patch_size) ** 2
__a = num_patches + 1
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Any):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = ViTMSNModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = self.type_sequence_label_size
__a = ViTMSNForImageClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''')
print('''Labels: {labels}''')
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
__a = 1
__a = ViTMSNForImageClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[Any] = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
UpperCamelCase__ : Optional[int] = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : int = False
UpperCamelCase__ : Optional[Any] = False
UpperCamelCase__ : Any = False
UpperCamelCase__ : Any = False
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = ViTMSNModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''')
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE)
__a = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ViTMSNModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
def __snake_case ( ):
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''') if is_vision_available() else None
@slow
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
torch.manual_seed(2)
__a = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''').to(__SCREAMING_SNAKE_CASE)
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''').to(__SCREAMING_SNAKE_CASE)
# forward pass
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE)
# verify the logits
__a = torch.Size((1, 1_000))
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor([-0.08_03, -0.44_54, -0.23_75]).to(__SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 60 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__snake_case :Any = TypeVar('''KT''')
__snake_case :List[str] = TypeVar('''VT''')
class _A ( Generic[KT, VT] ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None):
'''simple docstring'''
__a = key
__a = value
__a = []
def __repr__( self : Dict):
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return len(self.forward)
class _A ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16):
'''simple docstring'''
__a = Node[KT, VT]()
__a = 0
__a = p
__a = max_level
def __str__( self : Union[str, Any]):
'''simple docstring'''
__a = list(self)
if len(__SCREAMING_SNAKE_CASE) == 0:
return F'SkipList(level={self.level})'
__a = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4)
__a = max(__SCREAMING_SNAKE_CASE , 4) + 4
__a = self.head
__a = []
__a = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + '''* ''' * len(__SCREAMING_SNAKE_CASE))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
while len(node.forward) != 0:
__a = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''')
+ ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
__a = node.forward
lines.append('''None'''.ljust(__SCREAMING_SNAKE_CASE) + '''* ''' * len(__SCREAMING_SNAKE_CASE))
return F'SkipList(level={self.level})\n' + "\n".join(__SCREAMING_SNAKE_CASE)
def __iter__( self : int):
'''simple docstring'''
__a = self.head
while len(node.forward) != 0:
yield node.forward[0].key
__a = node.forward[0]
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = []
__a = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__a = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : KT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__a = node.forward[i]
else:
__a = update_node.forward[:i]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
__a = value
else:
__a = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE):
update_vector.append(self.head)
__a = level
__a = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE)
else:
__a = new_node
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
return node.value
return None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
if len(_UpperCAmelCase ) != 4:
print()
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __snake_case ( ):
__a = SkipList()
assert skip_list.find('''Some key''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def __snake_case ( ):
__a = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(_UpperCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __snake_case ( ):
def is_sorted(_UpperCAmelCase ):
return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) )
__a = SkipList()
for i in range(10 ):
skip_list.insert(_UpperCAmelCase , _UpperCAmelCase )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCAmelCase ) )
def __snake_case ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __snake_case ( ):
__a = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case :Union[str, Any] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :int = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[Any] = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[Any] = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__snake_case :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
__snake_case :str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Return True if there is node that has not iterated.
__a = [False] * len(_UpperCAmelCase )
__a = [s]
__a = True
while queue:
__a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCAmelCase )
__a = True
__a = u
return visited[t]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [-1] * (len(_UpperCAmelCase ))
__a = 0
__a = []
__a = [i[:] for i in graph] # Record original cut, copy.
while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = float('''Inf''' )
__a = sink
while s != source:
# Find the minimum value in select path
__a = min(_UpperCAmelCase , graph[parent[s]][s] )
__a = parent[s]
max_flow += path_flow
__a = sink
while v != source:
__a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a = parent[v]
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 60 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = 10
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [1, 2, 3, 4]
__a = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__SCREAMING_SNAKE_CASE , self.block_size , 0) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__SCREAMING_SNAKE_CASE , self.block_size , 0) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__SCREAMING_SNAKE_CASE , self.block_size , 0) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__a , __a = process_story(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , [])
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = ''''''
__a , __a = process_story(__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , [])
self.assertEqual(__SCREAMING_SNAKE_CASE , [])
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__a , __a = process_story(__SCREAMING_SNAKE_CASE)
__a = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = ['''It was the best of times.''']
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = torch.tensor([1, 2, 3, 4])
__a = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(__SCREAMING_SNAKE_CASE , 0).numpy() , expected.numpy())
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = torch.tensor([1, 2, 3, 4, 23, 23, 23])
__a = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(__SCREAMING_SNAKE_CASE , 23).numpy() , expected.numpy())
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = torch.tensor([8, 2, 3, 4, 1, 1, 1])
__a = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(__SCREAMING_SNAKE_CASE , 1).numpy() , expected.numpy())
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = 101
__a = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
__a = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
__a = compute_token_type_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
np.testing.assert_array_equal(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
| 60 |
from __future__ import annotations
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(_UpperCAmelCase ):
print(f'{i}\t\t{d}' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [float('''inf''' )] * vertex_count
__a = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__a = distance[u] + w
__a = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case :Dict = int(input('''Enter number of vertices: ''').strip())
__snake_case :Any = int(input('''Enter number of edges: ''').strip())
__snake_case :list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__snake_case ,__snake_case ,__snake_case :int = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__snake_case :Any = {'''src''': src, '''dst''': dest, '''weight''': weight}
__snake_case :List[str] = int(input('''\nEnter shortest path source:''').strip())
__snake_case :Optional[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 60 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = tempfile.mkdtemp()
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens]))
__a = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__a = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
__a = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1)) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = self.get_image_processor()
__a = AlignProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
processor_slow.save_pretrained(self.tmpdirname)
__a = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE)
__a = AlignProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
processor_fast.save_pretrained(self.tmpdirname)
__a = AlignProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE)
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__a = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
__a = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0)
__a = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = AlignProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = self.prepare_image_inputs()
__a = image_processor(__SCREAMING_SNAKE_CASE , return_tensors='''np''')
__a = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''np''')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = AlignProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = '''lower newer'''
__a = processor(text=__SCREAMING_SNAKE_CASE)
__a = tokenizer(__SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=64)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = AlignProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = '''lower newer'''
__a = self.prepare_image_inputs()
__a = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE):
processor()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = AlignProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(__SCREAMING_SNAKE_CASE)
__a = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = AlignProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
__a = '''lower newer'''
__a = self.prepare_image_inputs()
__a = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 60 |
import os
import sys
import unittest
__snake_case :Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__snake_case :List[str] = os.path.join(git_repo_path, '''src''', '''transformers''')
__snake_case :Any = '''
{0} = None
'''
__snake_case :Dict = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
__snake_case :str = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(__SCREAMING_SNAKE_CASE)
__a = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''tokenizers''')
__a = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''tensorflow_text''')
__a = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers''')
__a = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tensorflow_text''')
__a = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers_and_vision''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''tensorflow_text''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''sentencepiece_and_tokenizers''' , __SCREAMING_SNAKE_CASE)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''\nCONSTANT = None\n''')
__a = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
__SCREAMING_SNAKE_CASE , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
__a = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__a = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__a = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , __SCREAMING_SNAKE_CASE)
| 60 | 1 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = 0 ):
__a = length or len(_UpperCAmelCase )
__a = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__a , __a = list_data[i + 1], list_data[i]
__a = True
return list_data if not swapped else bubble_sort(_UpperCAmelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__snake_case :str = get_logger()
__snake_case :Optional[dict] = None
class _A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
super().__init__(features=__SCREAMING_SNAKE_CASE)
import jax
from jaxlib.xla_client import Device
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(
F'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` '
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''')
__a = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default '
F'device: {str(jax.devices()[0])}.')
__a = str(jax.devices()[0])
__a = jnp_array_kwargs
@staticmethod
def _lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(__SCREAMING_SNAKE_CASE): device for device in jax.devices()}
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and column:
if all(
isinstance(__SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0)
return column
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE))):
return value
elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__a = {}
if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__a = {'''dtype''': jnp.intaa}
else:
__a = {'''dtype''': jnp.intaa}
elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__a = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = np.asarray(__SCREAMING_SNAKE_CASE)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs})
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''') and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array):
__a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)):
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
return self._tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : dict):
'''simple docstring'''
return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE)
return self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0])
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
__a = self._consolidate(__SCREAMING_SNAKE_CASE)
return column
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE)
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
for column_name in batch:
__a = self._consolidate(batch[column_name])
return batch
| 60 | 1 |
import warnings
from ..trainer import Trainer
from ..utils import logging
__snake_case :Dict = logging.get_logger(__name__)
class _A ( __UpperCAmelCase ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any=None , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
warnings.warn(
'''`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` '''
'''instead.''' , __SCREAMING_SNAKE_CASE , )
super().__init__(args=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
| 60 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case :Tuple = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case :Union[str, Any] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0522, type=int)
__snake_case :List[str] = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
__snake_case :Optional[Any] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__snake_case :Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case :Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
__snake_case :Any = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
__snake_case :str = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
__snake_case :Optional[int] = {
'''169M''': 768,
'''430M''': 1024,
'''1B5''': 2048,
'''3B''': 2560,
'''7B''': 4096,
'''14B''': 5120,
}
def __snake_case ( _UpperCAmelCase ):
__a = list(state_dict.keys() )
for name in state_dict_keys:
__a = state_dict.pop(_UpperCAmelCase )
# emb -> embedding
if name.startswith('''emb.''' ):
__a = name.replace('''emb.''' , '''embeddings.''' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('''blocks.0.ln0''' ):
__a = name.replace('''blocks.0.ln0''' , '''blocks.0.pre_ln''' )
# att -> attention
__a = re.sub(R'''blocks\.(\d+)\.att''' , R'''blocks.\1.attention''' , _UpperCAmelCase )
# ffn -> feed_forward
__a = re.sub(R'''blocks\.(\d+)\.ffn''' , R'''blocks.\1.feed_forward''' , _UpperCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('''.time_mix_k''' ):
__a = name.replace('''.time_mix_k''' , '''.time_mix_key''' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('''.time_mix_v''' ):
__a = name.replace('''.time_mix_v''' , '''.time_mix_value''' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('''.time_mix_r''' ):
__a = name.replace('''.time_mix_r''' , '''.time_mix_receptance''' )
if name != "head.weight":
__a = '''rwkv.''' + name
__a = weight
return state_dict
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('''No `--tokenizer_file` provided, we will use the default tokenizer.''' )
__a = 50277
__a = AutoTokenizer.from_pretrained('''EleutherAI/gpt-neox-20b''' )
else:
__a = PreTrainedTokenizerFast(tokenizer_file=_UpperCAmelCase )
__a = len(_UpperCAmelCase )
tokenizer.save_pretrained(_UpperCAmelCase )
# 2. Build the config
__a = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a = candidate
break
if size is None:
raise ValueError('''Could not infer the size, please provide it with the `--size` argument.''' )
if size not in possible_sizes:
raise ValueError(f'`size` should be one of {possible_sizes}, got {size}.' )
__a = RwkvConfig(
vocab_size=_UpperCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCAmelCase )
# 3. Download model file then convert state_dict
__a = hf_hub_download(_UpperCAmelCase , _UpperCAmelCase )
__a = torch.load(_UpperCAmelCase , map_location='''cpu''' )
__a = convert_state_dict(_UpperCAmelCase )
# 4. Split in shards and save
__a , __a = shard_checkpoint(_UpperCAmelCase )
for shard_file, shard in shards.items():
torch.save(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
if index is not None:
__a = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
# Save the index as well
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
__a = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + '''\n'''
f.write(_UpperCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'''Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.''' )
__a = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a = torch.load(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('''Please provide a `model_name` to push the model to the Hub.''' )
__a = AutoModelForCausalLM.from_pretrained(_UpperCAmelCase )
model.push_to_hub(_UpperCAmelCase , max_shard_size='''2GB''' )
tokenizer.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
__snake_case :int = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 60 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__snake_case :List[str] = HfApi()
__snake_case :str = {}
# fmt: off
__snake_case :Optional[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
__snake_case :Union[str, Any] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
__snake_case :str = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
__snake_case :List[Any] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
__snake_case :Any = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
__snake_case :List[str] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
__snake_case :Optional[int] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
__snake_case :Tuple = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
__snake_case :List[Any] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
__snake_case :Optional[Any] = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
__snake_case :Optional[Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
__snake_case :List[str] = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
__snake_case :Any = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
__snake_case :List[str] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
__snake_case :Union[str, Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
__snake_case :List[Any] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
__snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__snake_case :str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__snake_case :List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__snake_case :Any = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 60 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Union[List[PIL.Image.Image], np.ndarray]
UpperCamelCase__ : Optional[List[bool]]
UpperCamelCase__ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 60 |
from collections.abc import Generator
from math import sin
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''08x''' )[-8:]
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __snake_case ( _UpperCAmelCase ):
__a = b''''''
for char in message:
bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' )
__a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCAmelCase ) , 512 ):
__a = bit_string[pos : pos + 512]
__a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''032b''' )
__a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCAmelCase , 2 )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (a + b) % 2**32
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __snake_case ( _UpperCAmelCase ):
__a = preprocess(_UpperCAmelCase )
__a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__a = 0X67_452_301
__a = 0Xef_cda_b89
__a = 0X98_bad_cfe
__a = 0X10_325_476
__a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCAmelCase ):
__a = aa
__a = ba
__a = ca
__a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__a = d ^ (b & (c ^ d))
__a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__a = c ^ (d & (b ^ c))
__a = (5 * i + 1) % 16
elif i <= 47:
__a = b ^ c ^ d
__a = (3 * i + 5) % 16
else:
__a = c ^ (b | not_aa(_UpperCAmelCase ))
__a = (7 * i) % 16
__a = (f + a + added_consts[i] + block_words[g]) % 2**32
__a = d
__a = c
__a = b
__a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 1 |
from __future__ import annotations
import os
from collections.abc import Mapping
__snake_case :Optional[int] = tuple[int, int]
class _A :
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : set[int] , __SCREAMING_SNAKE_CASE : Mapping[EdgeT, int]):
'''simple docstring'''
__a = vertices
__a = {
(min(__SCREAMING_SNAKE_CASE), max(__SCREAMING_SNAKE_CASE)): weight for edge, weight in edges.items()
}
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : EdgeT , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
self.vertices.add(edge[0])
self.vertices.add(edge[1])
__a = weight
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = Graph({min(self.vertices)} , {})
__a = 42
__a = 42
__a = 42
__a = 42
while len(subgraph.vertices) < len(self.vertices):
__a = max(self.edges.values()) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a = edge
__a = weight
subgraph.add_edge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return subgraph
def __snake_case ( _UpperCAmelCase = "p107_network.txt" ):
__a = os.path.abspath(os.path.dirname(_UpperCAmelCase ) )
__a = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
__a = {}
__a = 42
__a = 42
__a = 42
with open(_UpperCAmelCase ) as f:
__a = f.read().strip().split('''\n''' )
__a = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(_UpperCAmelCase ) ):
for edgea in range(_UpperCAmelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
__a = int(adjaceny_matrix[edgea][edgea] )
__a = Graph(set(range(len(_UpperCAmelCase ) ) ) , _UpperCAmelCase )
__a = graph.prims_algorithm()
__a = sum(graph.edges.values() )
__a = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'{solution() = }')
| 60 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__snake_case :List[Any] = '''zero2'''
__snake_case :Optional[Any] = '''zero3'''
__snake_case :str = [ZEROa, ZEROa]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
__snake_case :List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _A ( __UpperCAmelCase ):
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
self.do_checks(__SCREAMING_SNAKE_CASE)
return output_dir
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE)
__a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__a = self.get_launcher(__SCREAMING_SNAKE_CASE)
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env())
return output_dir
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False):
'''simple docstring'''
__a = min(2 , get_gpu_count()) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 60 | 1 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__snake_case :Tuple = 5_0003
__snake_case :int = 5_0002
@require_sentencepiece
@require_tokenizers
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[str] = PLBartTokenizer
UpperCamelCase__ : Tuple = None
UpperCamelCase__ : List[Any] = False
def _lowerCamelCase ( self : str):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = PLBartTokenizer(__SCREAMING_SNAKE_CASE , language_codes='''base''' , keep_accents=__SCREAMING_SNAKE_CASE)
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = PLBartTokenizer(__SCREAMING_SNAKE_CASE , language_codes='''base''' , keep_accents=__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__a = tokenizer.vocab_size
__a = [tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE) for x in range(end - 4 , __SCREAMING_SNAKE_CASE)]
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''])
__a = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__a = tokenizer(__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(
tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = PLBartTokenizer(__SCREAMING_SNAKE_CASE , language_codes='''multi''' , keep_accents=__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__a = tokenizer.vocab_size
__a = [tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE) for x in range(end - 7 , __SCREAMING_SNAKE_CASE)]
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''])
__a = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__a = tokenizer(__SCREAMING_SNAKE_CASE).input_ids
self.assertEqual(
tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = '''uclanlp/plbart-python-en_XX'''
UpperCamelCase__ : Optional[int] = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
UpperCamelCase__ : Any = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
UpperCamelCase__ : Any = [
134,
5_452,
33_460,
33_441,
33_463,
33_465,
33_463,
33_449,
988,
20,
33_456,
19,
33_456,
771,
39,
4_258,
889,
3_318,
33_441,
33_463,
33_465,
33_463,
33_449,
2_471,
2,
PYTHON_CODE,
]
@classmethod
def _lowerCamelCase ( cls : str):
'''simple docstring'''
__a = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''')
__a = 1
return cls
def _lowerCamelCase ( self : int):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50_001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50_002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50_003)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
self.assertIn(__SCREAMING_SNAKE_CASE , self.tokenizer.all_special_ids)
__a = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
__a = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE)
__a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertNotIn(self.tokenizer.eos_token , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , __SCREAMING_SNAKE_CASE)
__a = 10
__a = self.tokenizer(__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , __SCREAMING_SNAKE_CASE)
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__''']) , [50_004, 50_001])
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = tempfile.mkdtemp()
__a = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE)
__a = PLBartTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __SCREAMING_SNAKE_CASE)
@require_torch
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , return_tensors='''pt''')
__a = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE])
self.assertEqual(batch.decoder_input_ids[1][0] , __SCREAMING_SNAKE_CASE)
self.assertEqual(batch.decoder_input_ids[1][-1] , 2)
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE])
@require_torch
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=len(self.expected_src_tokens) , return_tensors='''pt''' , )
__a = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id)
self.assertIsInstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual((2, 26) , batch.input_ids.shape)
self.assertEqual((2, 26) , batch.attention_mask.shape)
__a = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __SCREAMING_SNAKE_CASE)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE])
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.tokenizer(self.src_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=3 , return_tensors='''pt''')
__a = self.tokenizer(
text_target=self.tgt_text , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=10 , return_tensors='''pt''')
__a = targets['''input_ids''']
__a = shift_tokens_right(__SCREAMING_SNAKE_CASE , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''')
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50_003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50_001,
} , )
| 60 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = f'Expected string as input, found {type(_UpperCAmelCase )}'
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = f'Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}'
raise ValueError(_UpperCAmelCase )
__a = input_str.split('''_''' )
__a = 0 if use_pascal else 1
__a = words[start_index:]
__a = [word[0].upper() + word[1:] for word in words_to_capitalize]
__a = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 60 | 1 |
import argparse
__snake_case :Any = '''docs/source/_static/js/custom.js'''
def __snake_case ( _UpperCAmelCase ):
with open(_UpperCAmelCase , encoding='''utf-8''' , newline='''\n''' ) as f:
__a = f.readlines()
__a = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
__a = f'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += f' "v{version}": "v{version}",\n'
with open(_UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
__snake_case :Dict = parser.parse_args()
update_custom_js(args.version)
| 60 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__snake_case :List[str] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class _A :
UpperCamelCase__ : str
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a , __a , __a = _str_to_version_tuple(self.version_str)
def __repr__( self : Tuple):
'''simple docstring'''
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.major, self.minor, self.patch
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return Version(__SCREAMING_SNAKE_CASE)
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return other
raise TypeError(F'{other} (type {type(__SCREAMING_SNAKE_CASE)}) cannot be compared to version.')
def __eq__( self : int , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
try:
__a = self._validate_operand(__SCREAMING_SNAKE_CASE)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : str , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = self._validate_operand(__SCREAMING_SNAKE_CASE)
return self.tuple < other.tuple
def __hash__( self : Optional[Any]):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.version_str
def __snake_case ( _UpperCAmelCase ):
__a = _VERSION_REG.match(_UpperCAmelCase )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(_UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def __snake_case ( _UpperCAmelCase ):
return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
| 60 | 1 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case :Optional[Any] = logging.get_logger(__name__)
__snake_case :Any = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[str] = '''owlvit_text_model'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : Dict=49_408 , __SCREAMING_SNAKE_CASE : Optional[int]=512 , __SCREAMING_SNAKE_CASE : str=2_048 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : List[str]=8 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]="quick_gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-5 , __SCREAMING_SNAKE_CASE : List[str]=0.0 , __SCREAMING_SNAKE_CASE : str=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=1.0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Tuple=49_406 , __SCREAMING_SNAKE_CASE : List[Any]=49_407 , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
__a = vocab_size
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = max_position_embeddings
__a = hidden_act
__a = layer_norm_eps
__a = attention_dropout
__a = initializer_range
__a = initializer_factor
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE)
__a , __a = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''') == "owlvit":
__a = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[Any] = '''owlvit_vision_model'''
def __init__( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : Any=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : str=768 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]="quick_gelu" , __SCREAMING_SNAKE_CASE : List[str]=1E-5 , __SCREAMING_SNAKE_CASE : str=0.0 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : Optional[int]=1.0 , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = num_channels
__a = image_size
__a = patch_size
__a = hidden_act
__a = layer_norm_eps
__a = attention_dropout
__a = initializer_range
__a = initializer_factor
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE)
__a , __a = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''') == "owlvit":
__a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[Any] = '''owlvit'''
UpperCamelCase__ : Dict = True
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : str=2.65_92 , __SCREAMING_SNAKE_CASE : Dict=True , **__SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
if text_config is None:
__a = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''')
if vision_config is None:
__a = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''')
__a = OwlViTTextConfig(**__SCREAMING_SNAKE_CASE)
__a = OwlViTVisionConfig(**__SCREAMING_SNAKE_CASE)
__a = projection_dim
__a = logit_scale_init_value
__a = return_dict
__a = 1.0
@classmethod
def _lowerCamelCase ( cls : str , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
cls._set_token_in_kwargs(__SCREAMING_SNAKE_CASE)
__a , __a = cls.get_config_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if "model_type" in config_dict and hasattr(cls , '''model_type''') and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.')
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = {}
__a = text_config
__a = vision_config
return cls.from_dict(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = copy.deepcopy(self.__dict__)
__a = self.text_config.to_dict()
__a = self.vision_config.to_dict()
__a = self.__class__.model_type
return output
class _A ( __UpperCAmelCase ):
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
])
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
])
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return 1E-4
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : "ProcessorMixin" , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : Optional["TensorType"] = None , ):
'''simple docstring'''
__a = super().generate_dummy_inputs(
processor.tokenizer , batch_size=__SCREAMING_SNAKE_CASE , seq_length=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE)
__a = super().generate_dummy_inputs(
processor.image_processor , batch_size=__SCREAMING_SNAKE_CASE , framework=__SCREAMING_SNAKE_CASE)
return {**text_input_dict, **image_input_dict}
@property
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return 14
| 60 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case :int = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _A ( tr.AbstractTransform ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "):
'''simple docstring'''
__a = sentence_delimiter
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return list(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = []
for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE):
chars.extend(self.process_string(__SCREAMING_SNAKE_CASE))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1:
chars.append(self.sentence_delimiter)
return chars
__snake_case :Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case :Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case :Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case :Tuple = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case :Tuple = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 60 | 1 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
__a , __a = get_aligned_output_features_output_indices(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , ['''c'''])
self.assertEqual(__SCREAMING_SNAKE_CASE , [2])
# Out indices set to match out features
__a , __a = get_aligned_output_features_output_indices(['''a''', '''c'''] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , ['''a''', '''c'''])
self.assertEqual(__SCREAMING_SNAKE_CASE , [0, 2])
# Out features set to match out indices
__a , __a = get_aligned_output_features_output_indices(__SCREAMING_SNAKE_CASE , [0, 2] , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , ['''a''', '''c'''])
self.assertEqual(__SCREAMING_SNAKE_CASE , [0, 2])
# Out features selected from negative indices
__a , __a = get_aligned_output_features_output_indices(__SCREAMING_SNAKE_CASE , [-3, -1] , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , ['''a''', '''c'''])
self.assertEqual(__SCREAMING_SNAKE_CASE , [-3, -1])
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
with self.assertRaises(__SCREAMING_SNAKE_CASE):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , __SCREAMING_SNAKE_CASE)
# Out features must be a list
with self.assertRaises(__SCREAMING_SNAKE_CASE):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''])
# Out features must be a subset of stage names
with self.assertRaises(__SCREAMING_SNAKE_CASE):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''])
# Out indices must be a list or tuple
with self.assertRaises(__SCREAMING_SNAKE_CASE):
verify_out_features_out_indices(__SCREAMING_SNAKE_CASE , 0 , ['''a''', '''b'''])
# Out indices must be a subset of stage names
with self.assertRaises(__SCREAMING_SNAKE_CASE):
verify_out_features_out_indices(__SCREAMING_SNAKE_CASE , (0, 1) , ['''a'''])
# Out features and out indices must be the same length
with self.assertRaises(__SCREAMING_SNAKE_CASE):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''])
# Out features should match out indices
with self.assertRaises(__SCREAMING_SNAKE_CASE):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''])
# Out features and out indices should be in order
with self.assertRaises(__SCREAMING_SNAKE_CASE):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''])
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''])
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = BackboneMixin()
__a = ['''a''', '''b''', '''c''']
__a = ['''a''', '''c''']
__a = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''])
self.assertEqual(backbone.out_indices , [0, 2])
# Check out features and indices are updated correctly
__a = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''])
self.assertEqual(backbone.out_indices , [0, 1])
__a = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''])
self.assertEqual(backbone.out_indices , [-3, -1])
| 60 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case :Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = ['''ViTFeatureExtractor''']
__snake_case :Optional[Any] = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = inspect.getfile(accelerate.test_utils)
__a = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ['''scripts''', '''test_script.py'''])
__a = os.path.sep.join(inspect.getfile(self.__class__).split(os.path.sep)[:-1])
@require_tpu
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = F'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
__a = [sys.executable] + distributed_args
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy())
| 60 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[str] = GPTSwaTokenizer
UpperCamelCase__ : Dict = False
UpperCamelCase__ : int = True
UpperCamelCase__ : List[Any] = False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = '''This is a test'''
__a = '''This is a test'''
return input_text, output_text
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = '''<s>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2_000)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_000)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [465, 287, 265, 631, 842])
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__a = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# Test that decode_fast returns the input text
for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
| 60 | 1 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
__snake_case :Dict = logging.get_logger(__name__)
class _A :
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = question_encoder
__a = generator
__a = self.question_encoder
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
if os.path.isfile(__SCREAMING_SNAKE_CASE):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE)
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''question_encoder_tokenizer''')
__a = os.path.join(__SCREAMING_SNAKE_CASE , '''generator_tokenizer''')
self.question_encoder.save_pretrained(__SCREAMING_SNAKE_CASE)
self.generator.save_pretrained(__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , __SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
__a = kwargs.pop('''config''' , __SCREAMING_SNAKE_CASE)
if config is None:
__a = RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE)
__a = AutoTokenizer.from_pretrained(
__SCREAMING_SNAKE_CASE , config=config.question_encoder , subfolder='''question_encoder_tokenizer''')
__a = AutoTokenizer.from_pretrained(
__SCREAMING_SNAKE_CASE , config=config.generator , subfolder='''generator_tokenizer''')
return cls(question_encoder=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE)
def __call__( self : Tuple , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
return self.current_tokenizer(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
return self.generator.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
return self.generator.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self.question_encoder
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.generator
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[List[str]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : str = "longest" , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = True , **__SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
warnings.warn(
'''`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '''
'''regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '''
'''context manager to prepare your targets. See the documentation of your specific tokenizer for more '''
'''details''' , __SCREAMING_SNAKE_CASE , )
if max_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__a = self.current_tokenizer.model_max_length
__a = self(
text_target=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = labels['''input_ids''']
return model_inputs
| 60 |
from __future__ import annotations
__snake_case :Optional[Any] = []
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if row >= len(_UpperCAmelCase ):
solution.append(_UpperCAmelCase )
printboard(_UpperCAmelCase )
print()
return True
for i in range(len(_UpperCAmelCase ) ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = 1
solve(_UpperCAmelCase , row + 1 )
__a = 0
return False
def __snake_case ( _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(_UpperCAmelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__snake_case :Optional[Any] = 8
__snake_case :Tuple = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 60 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :str = {
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Union[str, Any] = [
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
__snake_case :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
def __snake_case ( _UpperCAmelCase ):
__a = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __snake_case ( _UpperCAmelCase ):
__a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__a = remove_duplicates(key.upper() )
__a = len(_UpperCAmelCase )
# First fill cipher with key characters
__a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_UpperCAmelCase ) , 26 ):
__a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__a = alphabet[i - offset]
__a = char
return cipher_alphabet
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( ):
__a = input('''Enter message to encode or decode: ''' ).strip()
__a = input('''Enter keyword: ''' ).strip()
__a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__a = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__a = create_cipher_map(_UpperCAmelCase )
print(func(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 1 |
__snake_case :int = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 60 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__snake_case :List[Any] = None
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case :Union[str, Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__snake_case :Optional[Any] = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__snake_case :Optional[int] = '''▁'''
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : Dict = BarthezTokenizer
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = vocab_file
__a = False if not self.vocab_file else True
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 60 | 1 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' ,__UpperCAmelCase ,)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = RobertaConfig
UpperCamelCase__ : Optional[Any] = '''roberta'''
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE)
__a = RobertaEmbeddings(__SCREAMING_SNAKE_CASE)
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' ,__UpperCAmelCase ,)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Dict = RobertaConfig
UpperCamelCase__ : Tuple = '''roberta'''
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE)
__a = config.num_labels
__a = config.num_hidden_layers
__a = DeeRobertaModel(__SCREAMING_SNAKE_CASE)
__a = nn.Dropout(config.hidden_dropout_prob)
__a = nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : List[Any]=-1 , __SCREAMING_SNAKE_CASE : List[Any]=False , ):
'''simple docstring'''
__a = self.num_layers
try:
__a = self.roberta(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE , )
__a = outputs[1]
__a = self.dropout(__SCREAMING_SNAKE_CASE)
__a = self.classifier(__SCREAMING_SNAKE_CASE)
__a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__a = e.message
__a = e.exit_layer
__a = outputs[0]
if not self.training:
__a = entropy(__SCREAMING_SNAKE_CASE)
__a = []
__a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(logits.view(-1) , labels.view(-1))
else:
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
__a = []
for highway_exit in outputs[-1]:
__a = highway_exit[0]
if not self.training:
highway_logits_all.append(__SCREAMING_SNAKE_CASE)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
__a = CrossEntropyLoss()
__a = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(__SCREAMING_SNAKE_CASE)
if train_highway:
__a = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
__a = (loss,) + outputs
if not self.training:
__a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 60 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__snake_case :Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__snake_case :Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __snake_case ( _UpperCAmelCase ):
__a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(rows * cols * num_images )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
__a = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 )
return data
@deprecated(_UpperCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = labels_dense.shape[0]
__a = numpy.arange(_UpperCAmelCase ) * num_classes
__a = numpy.zeros((num_labels, num_classes) )
__a = 1
return labels_one_hot
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(_UpperCAmelCase )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase )
return labels
class _A :
@deprecated(
__SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Any=dtypes.floataa , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=None , ):
'''simple docstring'''
__a , __a = random_seed.get_seed(__SCREAMING_SNAKE_CASE)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
__a = dtypes.as_dtype(__SCREAMING_SNAKE_CASE).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype)
if fake_data:
__a = 10_000
__a = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
__a = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__a = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__a = images.astype(numpy.floataa)
__a = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_55.0)
__a = images
__a = labels
__a = 0
__a = 0
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self._images
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self._labels
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._num_examples
@property
def _lowerCamelCase ( self : str):
'''simple docstring'''
return self._epochs_completed
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True):
'''simple docstring'''
if fake_data:
__a = [1] * 784
__a = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__SCREAMING_SNAKE_CASE)],
[fake_label for _ in range(__SCREAMING_SNAKE_CASE)],
)
__a = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perma]
__a = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__a = self._num_examples - start
__a = self._images[start : self._num_examples]
__a = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perm]
__a = self.labels[perm]
# Start next epoch
__a = 0
__a = batch_size - rest_num_examples
__a = self._index_in_epoch
__a = self._images[start:end]
__a = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0),
)
else:
self._index_in_epoch += batch_size
__a = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase , '''Please write your own downloading logic.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
__a = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
__a = f.size()
print('''Successfully downloaded''' , _UpperCAmelCase , _UpperCAmelCase , '''bytes.''' )
return filepath
@deprecated(
_UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=5000 , _UpperCAmelCase=None , _UpperCAmelCase=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase )
__a = fake()
__a = fake()
__a = fake()
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
if not source_url: # empty string check
__a = DEFAULT_SOURCE_URL
__a = '''train-images-idx3-ubyte.gz'''
__a = '''train-labels-idx1-ubyte.gz'''
__a = '''t10k-images-idx3-ubyte.gz'''
__a = '''t10k-labels-idx1-ubyte.gz'''
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
__a = (
'''Validation size should be between 0 and '''
f'{len(_UpperCAmelCase )}. Received: {validation_size}.'
)
raise ValueError(_UpperCAmelCase )
__a = train_images[:validation_size]
__a = train_labels[:validation_size]
__a = train_images[validation_size:]
__a = train_labels[validation_size:]
__a = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
| 60 | 1 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__snake_case :List[Any] = '''zero2'''
__snake_case :Optional[Any] = '''zero3'''
__snake_case :str = [ZEROa, ZEROa]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
__snake_case :List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _A ( __UpperCAmelCase ):
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
self.do_checks(__SCREAMING_SNAKE_CASE)
return output_dir
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE)
__a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__a = self.get_launcher(__SCREAMING_SNAKE_CASE)
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env())
return output_dir
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False):
'''simple docstring'''
__a = min(2 , get_gpu_count()) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 60 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : List[Any]=18 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : int=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=False , ):
'''simple docstring'''
__a = size if size is not None else {'''height''': 20, '''width''': 20}
__a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_reduce_labels
def _lowerCamelCase ( self : str):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(dataset[0]['''file'''] )
__a = Image.open(dataset[1]['''file'''] )
return image, map
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(ds[0]['''file'''] )
__a = Image.open(ds[1]['''file'''] )
__a = Image.open(ds[2]['''file'''] )
__a = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = BeitImageProcessingTester(self)
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std'''))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
__a = []
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
__a = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test not batched input (PIL images)
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched input (PIL images)
__a , __a = prepare_semantic_batch_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 150)
__a = True
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
| 60 | 1 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class _A :
def __init__( self : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]=99 , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : Union[str, Any]=7 , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=32 , __SCREAMING_SNAKE_CASE : str=4 , __SCREAMING_SNAKE_CASE : Optional[int]=4 , __SCREAMING_SNAKE_CASE : int=30 , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = decoder_seq_length
# For common tests
__a = self.decoder_seq_length
__a = is_training
__a = use_attention_mask
__a = use_labels
__a = vocab_size
__a = d_model
__a = d_model
__a = decoder_layers
__a = decoder_layers
__a = decoder_ffn_dim
__a = decoder_attention_heads
__a = decoder_attention_heads
__a = eos_token_id
__a = bos_token_id
__a = pad_token_id
__a = decoder_start_token_id
__a = use_cache
__a = max_position_embeddings
__a = None
__a = decoder_seq_length
__a = 2
__a = 1
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
__a = None
if self.use_attention_mask:
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2)
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
__a = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = True
__a = TrOCRDecoder(config=__SCREAMING_SNAKE_CASE).to(__SCREAMING_SNAKE_CASE).eval()
__a = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__a = model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE)
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE) == len(__SCREAMING_SNAKE_CASE))
self.parent.assertTrue(len(__SCREAMING_SNAKE_CASE) == len(__SCREAMING_SNAKE_CASE) + 1)
__a = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((2, 1) , config.vocab_size - 1) + 1
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1)
__a = model(__SCREAMING_SNAKE_CASE)['''last_hidden_state''']
__a = model(__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE)['''last_hidden_state''']
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1]).item()
__a = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=1E-3)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Optional[int] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCamelCase__ : Optional[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ : int = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
UpperCamelCase__ : Optional[Any] = True
UpperCamelCase__ : int = False
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = TrOCRStandaloneDecoderModelTester(self , is_training=__SCREAMING_SNAKE_CASE)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : str):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return
@unittest.skip('''The model doesn\'t support left padding''') # and it's not used enough to be worth fixing :)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
pass
| 60 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : int):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''])
for i, r in enumerate(__SCREAMING_SNAKE_CASE):
self.assertDictEqual(__SCREAMING_SNAKE_CASE , example_records[i])
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
__a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def _lowerCamelCase ( self : int): # checks what happens with missing columns
'''simple docstring'''
__a = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertDictEqual(dset[0] , {'''col_1''': 1})
self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns
def _lowerCamelCase ( self : Optional[Any]): # checks if the type can be inferred from the second record
'''simple docstring'''
__a = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''')))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = Dataset.from_list([])
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 0)
self.assertListEqual(dset.column_names , [])
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case :List[str] = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Union[str, Any] = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[Any] = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[Any] = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__snake_case :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( _UpperCAmelCase ):
__a = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __snake_case ( _UpperCAmelCase ):
__a = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def __snake_case ( ):
__a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a = [2, 2, 20]
__a = [3, 12, 16]
__a = [192, 768, 1024]
__a = CvtForImageClassification(_UpperCAmelCase )
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__a = image_size
__a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) )
__a = OrderedDict()
__a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__a = list_of_state_dict + cls_token(_UpperCAmelCase )
__a = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
__a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__snake_case :str = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case :Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 60 | 1 |
import heapq
import sys
import numpy as np
__snake_case :List[Any] = tuple[int, int]
class _A :
def __init__( self : str):
'''simple docstring'''
__a = []
__a = set()
def _lowerCamelCase ( self : int):
'''simple docstring'''
if not self.empty():
return self.elements[0][0]
else:
return float('''inf''')
def _lowerCamelCase ( self : str):
'''simple docstring'''
return len(self.elements) == 0
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
if item not in self.set:
heapq.heappush(self.elements , (priority, item))
self.set.add(__SCREAMING_SNAKE_CASE)
else:
# update
# print("update", item)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements)
while x != item:
temp.append((pri, x))
((__a) , (__a)) = heapq.heappop(self.elements)
temp.append((priority, item))
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx))
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
if item in self.set:
self.set.remove(__SCREAMING_SNAKE_CASE)
__a = []
((__a) , (__a)) = heapq.heappop(self.elements)
while x != item:
temp.append((pro, x))
((__a) , (__a)) = heapq.heappop(self.elements)
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy))
def _lowerCamelCase ( self : str):
'''simple docstring'''
return self.elements[0][1]
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
((__a) , (__a)) = heapq.heappop(self.elements)
self.set.remove(__SCREAMING_SNAKE_CASE)
return (priority, item)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
# euclidean distance
__a = np.array(_UpperCAmelCase )
__a = np.array(_UpperCAmelCase )
return np.linalg.norm(a - b )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
# integer division by time variable
return consistent_heuristic(_UpperCAmelCase , _UpperCAmelCase ) // t
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = g_function[start] + Wa * heuristics[i](_UpperCAmelCase , _UpperCAmelCase )
return ans
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = np.chararray((n, n) )
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__a = '''*'''
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (j, (n - 1) - i) in blocks:
__a = '''#'''
__a = '''-'''
__a = back_pointer[goal]
while x != start:
((__a) , (__a)) = x
# print(x)
__a = '''-'''
__a = back_pointer[x]
__a = '''-'''
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=''' ''' )
print('''<-- End position''' , end=''' ''' )
else:
print(grid[i][j] , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
print('''PATH TAKEN BY THE ALGORITHM IS:-''' )
__a = back_pointer[goal]
while x != start:
print(_UpperCAmelCase , end=''' ''' )
__a = back_pointer[x]
print(_UpperCAmelCase )
sys.exit()
def __snake_case ( _UpperCAmelCase ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
for itera in range(_UpperCAmelCase ):
open_list[itera].remove_element(_UpperCAmelCase )
# print("s", s)
# print("j", j)
((__a) , (__a)) = s
__a = (x - 1, y)
__a = (x + 1, y)
__a = (x, y + 1)
__a = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(_UpperCAmelCase ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(_UpperCAmelCase )
__a = -1
__a = float('''inf''' )
if valid(_UpperCAmelCase ) and g_function[neighbours] > g_function[s] + 1:
__a = g_function[s] + 1
__a = s
if neighbours not in close_list_anchor:
open_list[0].put(_UpperCAmelCase , key(_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ) )
if neighbours not in close_list_inad:
for var in range(1 , _UpperCAmelCase ):
if key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) <= Wa * key(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase ):
open_list[j].put(
_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
def __snake_case ( ):
__a = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
__snake_case :List[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
__snake_case :str = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
__snake_case :Union[str, Any] = make_common_ground()
__snake_case :Any = blocks_blk
# hyper parameters
__snake_case :Union[str, Any] = 1
__snake_case :List[Any] = 1
__snake_case :List[Any] = 20
__snake_case :List[Any] = 3 # one consistent and two other inconsistent
# start and end destination
__snake_case :Union[str, Any] = (0, 0)
__snake_case :Tuple = (n - 1, n - 1)
__snake_case :Optional[int] = 1
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {start: 0, goal: float('''inf''' )}
__a = {start: -1, goal: -1}
__a = []
__a = set()
for i in range(_UpperCAmelCase ):
open_list.append(PriorityQueue() )
open_list[i].put(_UpperCAmelCase , key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) )
__a = []
__a = []
while open_list[0].minkey() < float('''inf''' ):
for i in range(1 , _UpperCAmelCase ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('''inf''' ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__a , __a = open_list[i].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_inad.append(_UpperCAmelCase )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('''inf''' ):
do_something(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
__a = open_list[0].top_show()
visited.add(_UpperCAmelCase )
expand_state(
_UpperCAmelCase , 0 , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
close_list_anchor.append(_UpperCAmelCase )
print('''No path found to goal''' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(_UpperCAmelCase ):
if (j, i) in blocks:
print('''#''' , end=''' ''' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('''*''' , end=''' ''' )
else:
print('''-''' , end=''' ''' )
else:
print('''*''' , end=''' ''' )
if (j, i) == (n - 1, n - 1):
print('''<-- End position''' , end=''' ''' )
print()
print('''^''' )
print('''Start position''' )
print()
print('''# is an obstacle''' )
print('''- is the path taken by algorithm''' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 60 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( _UpperCAmelCase ):
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class _A ( __UpperCAmelCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
@torch.no_grad()
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = 1
elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
__a = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}')
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = preprocess(__SCREAMING_SNAKE_CASE)
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters()).dtype
__a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE)
__a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(__SCREAMING_SNAKE_CASE):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1)
__a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# predict the noise residual
__a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample
__a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0)
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
| 60 | 1 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__snake_case :Tuple = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
__snake_case :Tuple = 10
__snake_case :str = 256
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) < MIN_NUM_TOKENS:
return None
__a = MinHash(num_perm=_UpperCAmelCase )
for token in set(_UpperCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def __snake_case ( _UpperCAmelCase ):
return {t for t in NON_ALPHA.split(_UpperCAmelCase ) if len(t.strip() ) > 0}
class _A :
def __init__( self : Optional[Any] , *,
__SCREAMING_SNAKE_CASE : float = 0.85 , ):
'''simple docstring'''
__a = duplication_jaccard_threshold
__a = NUM_PERM
__a = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm)
__a = defaultdict(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : MinHash):
'''simple docstring'''
__a = self._index.query(__SCREAMING_SNAKE_CASE)
if code_key in self._index.keys:
print(F'Duplicate key {code_key}')
return
self._index.insert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if len(__SCREAMING_SNAKE_CASE) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__SCREAMING_SNAKE_CASE)
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = []
for base, duplicates in self._duplicate_clusters.items():
__a = [base] + list(__SCREAMING_SNAKE_CASE)
# reformat the cluster to be a list of dict
__a = [{'''base_index''': el[0], '''repo_name''': el[1], '''path''': el[2]} for el in cluster]
duplicate_clusters.append(__SCREAMING_SNAKE_CASE)
return duplicate_clusters
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = self.get_duplicate_clusters()
with open(__SCREAMING_SNAKE_CASE , '''w''') as f:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def __snake_case ( _UpperCAmelCase ):
__a , __a = element
__a = get_min_hash([t for t in NON_ALPHA.split(data['''content'''] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __snake_case ( _UpperCAmelCase ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_UpperCAmelCase , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = DuplicationIndex(duplication_jaccard_threshold=_UpperCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCAmelCase ) ) , max_queue_size=100 ) ):
di.add(_UpperCAmelCase , _UpperCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = get_tokens(_UpperCAmelCase )
__a = get_tokens(_UpperCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__snake_case :List[Any] = None
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
for elementa in cluster:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
for elementa in extremes:
__a = _shared_dataset[elementa['''base_index''']]['''content''']
if jaccard_similarity(_UpperCAmelCase , _UpperCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__a = 1
extremes.append(_UpperCAmelCase )
return extremes
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
global _shared_dataset
__a = dataset
__a = []
__a = partial(_find_cluster_extremes_shared , jaccard_threshold=_UpperCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCAmelCase , _UpperCAmelCase , ) , total=len(_UpperCAmelCase ) , ):
extremes_list.append(_UpperCAmelCase )
return extremes_list
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = 0.85 ):
__a = make_duplicate_clusters(_UpperCAmelCase , _UpperCAmelCase )
__a = {x['''base_index'''] for cluster in duplicate_clusters for x in cluster}
__a = {}
__a = find_extremes(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
__a = element
__a = duplicate_indices - set(extreme_dict.keys() )
__a = dataset.filter(lambda _UpperCAmelCase , _UpperCAmelCase : idx not in remove_indices , with_indices=_UpperCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__a = element['''base_index'''] in extreme_dict
if element["is_extreme"]:
__a = extreme_dict[element['''base_index''']]['''copies''']
print(f'Original dataset size: {len(_UpperCAmelCase )}' )
print(f'Number of duplicate clusters: {len(_UpperCAmelCase )}' )
print(f'Files in duplicate cluster: {len(_UpperCAmelCase )}' )
print(f'Unique files in duplicate cluster: {len(_UpperCAmelCase )}' )
print(f'Filtered dataset size: {len(_UpperCAmelCase )}' )
return ds_filter, duplicate_clusters
| 60 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__snake_case :Any = TypeVar('''KT''')
__snake_case :List[str] = TypeVar('''VT''')
class _A ( Generic[KT, VT] ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None):
'''simple docstring'''
__a = key
__a = value
__a = []
def __repr__( self : Dict):
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return len(self.forward)
class _A ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16):
'''simple docstring'''
__a = Node[KT, VT]()
__a = 0
__a = p
__a = max_level
def __str__( self : Union[str, Any]):
'''simple docstring'''
__a = list(self)
if len(__SCREAMING_SNAKE_CASE) == 0:
return F'SkipList(level={self.level})'
__a = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4)
__a = max(__SCREAMING_SNAKE_CASE , 4) + 4
__a = self.head
__a = []
__a = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + '''* ''' * len(__SCREAMING_SNAKE_CASE))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
while len(node.forward) != 0:
__a = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''')
+ ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
__a = node.forward
lines.append('''None'''.ljust(__SCREAMING_SNAKE_CASE) + '''* ''' * len(__SCREAMING_SNAKE_CASE))
return F'SkipList(level={self.level})\n' + "\n".join(__SCREAMING_SNAKE_CASE)
def __iter__( self : int):
'''simple docstring'''
__a = self.head
while len(node.forward) != 0:
yield node.forward[0].key
__a = node.forward[0]
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = []
__a = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__a = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : KT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__a = node.forward[i]
else:
__a = update_node.forward[:i]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
__a = value
else:
__a = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE):
update_vector.append(self.head)
__a = level
__a = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE)
else:
__a = new_node
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
return node.value
return None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
if len(_UpperCAmelCase ) != 4:
print()
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __snake_case ( ):
__a = SkipList()
assert skip_list.find('''Some key''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def __snake_case ( ):
__a = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(_UpperCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __snake_case ( ):
def is_sorted(_UpperCAmelCase ):
return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) )
__a = SkipList()
for i in range(10 ):
skip_list.insert(_UpperCAmelCase , _UpperCAmelCase )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCAmelCase ) )
def __snake_case ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __snake_case ( ):
__a = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class _A ( __UpperCAmelCase ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict=13 , __SCREAMING_SNAKE_CASE : str=7 , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : List[Any]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : List[str]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : str=5 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : Optional[Any]=512 , __SCREAMING_SNAKE_CASE : Union[str, Any]=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : List[str]=0.02 , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : List[str]=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length])
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__a = ids_tensor([self.batch_size] , self.num_choices)
__a = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : str):
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = DistilBertModel(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = DistilBertForMaskedLM(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = DistilBertForQuestionAnswering(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = self.num_labels
__a = DistilBertForSequenceClassification(__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = self.num_labels
__a = DistilBertForTokenClassification(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = self.num_choices
__a = DistilBertForMultipleChoice(config=__SCREAMING_SNAKE_CASE)
model.to(__SCREAMING_SNAKE_CASE)
model.eval()
__a = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__a = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__a = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
((__a) , (__a) , (__a) , (__a) , (__a) , (__a)) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _A ( __UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Optional[int] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase__ : List[str] = (
{
'''feature-extraction''': DistilBertModel,
'''fill-mask''': DistilBertForMaskedLM,
'''question-answering''': DistilBertForQuestionAnswering,
'''text-classification''': DistilBertForSequenceClassification,
'''token-classification''': DistilBertForTokenClassification,
'''zero-shot''': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ : Any = True
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : str = True
UpperCamelCase__ : Optional[int] = True
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = DistilBertModelTester(self)
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , dim=37)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = DistilBertModel.from_pretrained(__SCREAMING_SNAKE_CASE)
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@slow
@require_torch_gpu
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
__a = True
__a = model_class(config=__SCREAMING_SNAKE_CASE)
__a = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = torch.jit.trace(
__SCREAMING_SNAKE_CASE , (inputs_dict['''input_ids'''].to('''cpu'''), inputs_dict['''attention_mask'''].to('''cpu''')))
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , '''traced_model.pt'''))
__a = torch.jit.load(os.path.join(__SCREAMING_SNAKE_CASE , '''traced_model.pt''') , map_location=__SCREAMING_SNAKE_CASE)
loaded(inputs_dict['''input_ids'''].to(__SCREAMING_SNAKE_CASE) , inputs_dict['''attention_mask'''].to(__SCREAMING_SNAKE_CASE))
@require_torch
class _A ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = DistilBertModel.from_pretrained('''distilbert-base-uncased''')
__a = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]])
__a = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
__a = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE)[0]
__a = torch.Size((1, 11, 768))
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE)
__a = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1E-4))
| 60 |
__snake_case :str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Return True if there is node that has not iterated.
__a = [False] * len(_UpperCAmelCase )
__a = [s]
__a = True
while queue:
__a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCAmelCase )
__a = True
__a = u
return visited[t]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [-1] * (len(_UpperCAmelCase ))
__a = 0
__a = []
__a = [i[:] for i in graph] # Record original cut, copy.
while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = float('''Inf''' )
__a = sink
while s != source:
# Find the minimum value in select path
__a = min(_UpperCAmelCase , graph[parent[s]][s] )
__a = parent[s]
max_flow += path_flow
__a = sink
while v != source:
__a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a = parent[v]
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 60 | 1 |
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__snake_case :str = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
__snake_case :Optional[Any] = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
__snake_case :Optional[Any] = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
__snake_case :Optional[Any] = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
__snake_case :Tuple = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
for tf_name, hf_name in patterns:
__a = k.replace(_UpperCAmelCase , _UpperCAmelCase )
return k
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = BigBirdPegasusConfig(**_UpperCAmelCase )
__a = BigBirdPegasusForConditionalGeneration(_UpperCAmelCase )
__a = torch_model.state_dict()
__a = {}
# separating decoder weights
__a = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )}
__a = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )}
for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ):
__a = [k.endswith(_UpperCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCAmelCase ):
continue
__a = DECODER_PATTERNS
__a = rename_state_dict_key(_UpperCAmelCase , _UpperCAmelCase )
if new_k not in state_dict:
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__a = v.T
__a = torch.from_numpy(_UpperCAmelCase )
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ):
__a = [k.endswith(_UpperCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCAmelCase ):
continue
__a = REMAINING_PATTERNS
__a = rename_state_dict_key(_UpperCAmelCase , _UpperCAmelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ):
__a = v.T
__a = torch.from_numpy(_UpperCAmelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
__a = mapping['''model.embed_positions.weight''']
__a = mapping.pop('''model.embed_positions.weight''' )
__a , __a = torch_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
__a = [
k
for k in missing
if k
not in [
'''final_logits_bias''',
'''model.encoder.embed_tokens.weight''',
'''model.decoder.embed_tokens.weight''',
'''lm_head.weight''',
]
]
assert unexpected_missing == [], f'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], f'no matches found for the following tf keys {extra}'
return torch_model
def __snake_case ( _UpperCAmelCase ):
__a = tf.train.list_variables(_UpperCAmelCase )
__a = {}
__a = ['''global_step''']
for name, shape in tqdm(_UpperCAmelCase , desc='''converting tf checkpoint to dict''' ):
__a = any(pat in name for pat in ignore_name )
if skip_key:
continue
__a = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
__a = array
return tf_weights
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = get_tf_weights_as_numpy(_UpperCAmelCase )
__a = convert_bigbird_pegasus(_UpperCAmelCase , _UpperCAmelCase )
torch_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__snake_case :int = parser.parse_args()
__snake_case :str = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 60 |
from __future__ import annotations
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(_UpperCAmelCase ):
print(f'{i}\t\t{d}' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [float('''inf''' )] * vertex_count
__a = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__a = distance[u] + w
__a = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case :Dict = int(input('''Enter number of vertices: ''').strip())
__snake_case :Any = int(input('''Enter number of edges: ''').strip())
__snake_case :list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__snake_case ,__snake_case ,__snake_case :int = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__snake_case :Any = {'''src''': src, '''dst''': dest, '''weight''': weight}
__snake_case :List[str] = int(input('''\nEnter shortest path source:''').strip())
__snake_case :Optional[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 60 | 1 |
def __snake_case ( _UpperCAmelCase ):
if n_term == "":
return []
__a = []
for temp in range(int(_UpperCAmelCase ) ):
series.append(f'1/{temp + 1}' if series else '''1''' )
return series
if __name__ == "__main__":
__snake_case :Optional[Any] = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 60 |
import os
import sys
import unittest
__snake_case :Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__snake_case :List[str] = os.path.join(git_repo_path, '''src''', '''transformers''')
__snake_case :Any = '''
{0} = None
'''
__snake_case :Dict = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
__snake_case :str = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(__SCREAMING_SNAKE_CASE)
__a = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''tokenizers''')
__a = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''tensorflow_text''')
__a = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers''')
__a = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tensorflow_text''')
__a = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers_and_vision''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''tensorflow_text''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''sentencepiece_and_tokenizers''' , __SCREAMING_SNAKE_CASE)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''\nCONSTANT = None\n''')
__a = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
__SCREAMING_SNAKE_CASE , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
__a = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__a = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__a = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , __SCREAMING_SNAKE_CASE)
| 60 | 1 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__snake_case :int = ['''text''', '''image''', '''audio''']
def __snake_case ( _UpperCAmelCase ):
__a = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
inputs.append(create_inputs(_UpperCAmelCase ) )
else:
raise ValueError(f'Invalid type requested: {input_type}' )
return inputs
def __snake_case ( _UpperCAmelCase ):
__a = []
for output in outputs:
if isinstance(_UpperCAmelCase , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(_UpperCAmelCase , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(_UpperCAmelCase , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f'Invalid output: {output}' )
return output_types
@is_tool_test
class _A :
def _lowerCamelCase ( self : str):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , '''inputs'''))
self.assertTrue(hasattr(self.tool , '''outputs'''))
__a = self.tool.inputs
for _input in inputs:
if isinstance(_input , __SCREAMING_SNAKE_CASE):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
__a = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = create_inputs(self.tool.inputs)
__a = self.tool(*__SCREAMING_SNAKE_CASE)
# There is a single output
if len(self.tool.outputs) == 1:
__a = [outputs]
self.assertListEqual(output_types(__SCREAMING_SNAKE_CASE) , self.tool.outputs)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , '''description'''))
self.assertTrue(hasattr(self.tool , '''default_checkpoint'''))
self.assertTrue(self.tool.description.startswith('''This is a tool that'''))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = create_inputs(self.tool.inputs)
__a = self.tool(*__SCREAMING_SNAKE_CASE)
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , len(self.tool.outputs))
for output, output_type in zip(__SCREAMING_SNAKE_CASE , self.tool.outputs):
__a = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = create_inputs(self.tool.inputs)
__a = []
for _input, input_type in zip(__SCREAMING_SNAKE_CASE , self.tool.inputs):
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
__a = self.tool(*__SCREAMING_SNAKE_CASE)
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = [outputs]
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , len(self.tool.outputs))
| 60 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__snake_case :str = get_logger()
__snake_case :Optional[dict] = None
class _A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
super().__init__(features=__SCREAMING_SNAKE_CASE)
import jax
from jaxlib.xla_client import Device
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(
F'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` '
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''')
__a = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default '
F'device: {str(jax.devices()[0])}.')
__a = str(jax.devices()[0])
__a = jnp_array_kwargs
@staticmethod
def _lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(__SCREAMING_SNAKE_CASE): device for device in jax.devices()}
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and column:
if all(
isinstance(__SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0)
return column
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE))):
return value
elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__a = {}
if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__a = {'''dtype''': jnp.intaa}
else:
__a = {'''dtype''': jnp.intaa}
elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__a = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = np.asarray(__SCREAMING_SNAKE_CASE)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs})
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''') and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array):
__a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)):
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
return self._tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : dict):
'''simple docstring'''
return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE)
return self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0])
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
__a = self._consolidate(__SCREAMING_SNAKE_CASE)
return column
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE)
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
for column_name in batch:
__a = self._consolidate(batch[column_name])
return batch
| 60 | 1 |
from __future__ import annotations
def __snake_case ( _UpperCAmelCase ):
return [ord(_UpperCAmelCase ) - 96 for elem in plain]
def __snake_case ( _UpperCAmelCase ):
return "".join(chr(elem + 96 ) for elem in encoded )
def __snake_case ( ):
__a = encode(input('''-> ''' ).strip().lower() )
print('''Encoded: ''' , _UpperCAmelCase )
print('''Decoded:''' , decode(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
| 60 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case :Tuple = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case :Union[str, Any] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0522, type=int)
__snake_case :List[str] = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
__snake_case :Optional[Any] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__snake_case :Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case :Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
__snake_case :Any = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__snake_case :Any = (720, 1280) # Height, Width
__snake_case :Optional[int] = (0.4, 0.6) # if height or width lower than this scale, drop it.
__snake_case :int = 1 / 100
__snake_case :int = ''''''
__snake_case :str = ''''''
__snake_case :int = ''''''
__snake_case :List[str] = 250
def __snake_case ( ):
__a , __a = get_dataset(_UpperCAmelCase , _UpperCAmelCase )
for index in range(_UpperCAmelCase ):
__a = random.sample(range(len(_UpperCAmelCase ) ) , 4 )
__a , __a , __a = update_image_and_anno(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , filter_scale=_UpperCAmelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__a = random_chars(32 )
__a = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__a = f'{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'
cva.imwrite(f'{file_root}.jpg' , _UpperCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}' )
__a = []
for anno in new_annos:
__a = anno[3] - anno[1]
__a = anno[4] - anno[2]
__a = anno[1] + width / 2
__a = anno[2] + height / 2
__a = f'{anno[0]} {x_center} {y_center} {width} {height}'
annos_list.append(_UpperCAmelCase )
with open(f'{file_root}.txt' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
__a = []
for label_file in glob.glob(os.path.join(_UpperCAmelCase , '''*.txt''' ) ):
__a = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(_UpperCAmelCase ) as in_file:
__a = in_file.readlines()
__a = os.path.join(_UpperCAmelCase , f'{label_name}.jpg' )
__a = []
for obj_list in obj_lists:
__a = obj_list.rstrip('''\n''' ).split(''' ''' )
__a = float(obj[1] ) - float(obj[3] ) / 2
__a = float(obj[2] ) - float(obj[4] ) / 2
__a = float(obj[1] ) + float(obj[3] ) / 2
__a = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_UpperCAmelCase )
labels.append(_UpperCAmelCase )
return img_paths, labels
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0.0 , ):
__a = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__a = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__a = int(scale_x * output_size[1] )
__a = int(scale_y * output_size[0] )
__a = []
__a = []
for i, index in enumerate(_UpperCAmelCase ):
__a = all_img_list[index]
path_list.append(_UpperCAmelCase )
__a = all_annos[index]
__a = cva.imread(_UpperCAmelCase )
if i == 0: # top-left
__a = cva.resize(_UpperCAmelCase , (divid_point_x, divid_point_y) )
__a = img
for bbox in img_annos:
__a = bbox[1] * scale_x
__a = bbox[2] * scale_y
__a = bbox[3] * scale_x
__a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__a = cva.resize(_UpperCAmelCase , (output_size[1] - divid_point_x, divid_point_y) )
__a = img
for bbox in img_annos:
__a = scale_x + bbox[1] * (1 - scale_x)
__a = bbox[2] * scale_y
__a = scale_x + bbox[3] * (1 - scale_x)
__a = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__a = cva.resize(_UpperCAmelCase , (divid_point_x, output_size[0] - divid_point_y) )
__a = img
for bbox in img_annos:
__a = bbox[1] * scale_x
__a = scale_y + bbox[2] * (1 - scale_y)
__a = bbox[3] * scale_x
__a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__a = cva.resize(
_UpperCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__a = img
for bbox in img_annos:
__a = scale_x + bbox[1] * (1 - scale_x)
__a = scale_y + bbox[2] * (1 - scale_y)
__a = scale_x + bbox[3] * (1 - scale_x)
__a = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__a = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __snake_case ( _UpperCAmelCase ):
assert number_char > 1, "The number of character should greater than 1"
__a = ascii_lowercase + digits
return "".join(random.choice(_UpperCAmelCase ) for _ in range(_UpperCAmelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 60 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__snake_case :List[str] = HfApi()
__snake_case :str = {}
# fmt: off
__snake_case :Optional[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
__snake_case :Union[str, Any] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
__snake_case :str = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
__snake_case :List[Any] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
__snake_case :Any = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
__snake_case :List[str] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
__snake_case :Optional[int] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
__snake_case :Tuple = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
__snake_case :List[Any] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
__snake_case :Optional[Any] = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
__snake_case :Optional[Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
__snake_case :List[str] = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
__snake_case :Any = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
__snake_case :List[str] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
__snake_case :Union[str, Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
__snake_case :List[Any] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
__snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__snake_case :str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__snake_case :List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__snake_case :Any = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 60 | 1 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case :int = logging.getLogger(__name__)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = np.argmax(_UpperCAmelCase , axis=1 )
return np.sum(outputs == labels )
def __snake_case ( _UpperCAmelCase ):
with open(_UpperCAmelCase , encoding='''utf_8''' ) as f:
__a = csv.reader(_UpperCAmelCase )
__a = []
next(_UpperCAmelCase ) # skip the first line
for line in tqdm(_UpperCAmelCase ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = []
for dataset in encoded_datasets:
__a = len(_UpperCAmelCase )
__a = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__a = np.zeros((n_batch, 2) , dtype=np.intaa )
__a = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__a = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCAmelCase ):
__a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__a = with_conta
__a = with_conta
__a = len(_UpperCAmelCase ) - 1
__a = len(_UpperCAmelCase ) - 1
__a = with_conta
__a = with_conta
__a = mc_label
__a = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCAmelCase ) for t in all_inputs ) )
return tensor_datasets
def __snake_case ( ):
__a = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=_UpperCAmelCase , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=_UpperCAmelCase , default='''''' )
parser.add_argument('''--eval_dataset''' , type=_UpperCAmelCase , default='''''' )
parser.add_argument('''--seed''' , type=_UpperCAmelCase , default=42 )
parser.add_argument('''--num_train_epochs''' , type=_UpperCAmelCase , default=3 )
parser.add_argument('''--train_batch_size''' , type=_UpperCAmelCase , default=8 )
parser.add_argument('''--eval_batch_size''' , type=_UpperCAmelCase , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=_UpperCAmelCase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=_UpperCAmelCase , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=_UpperCAmelCase , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=_UpperCAmelCase , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=_UpperCAmelCase , default=6.2_5E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=_UpperCAmelCase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=_UpperCAmelCase , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=_UpperCAmelCase , default=0.01 )
parser.add_argument('''--lm_coef''' , type=_UpperCAmelCase , default=0.9 )
parser.add_argument('''--n_valid''' , type=_UpperCAmelCase , default=374 )
parser.add_argument('''--server_ip''' , type=_UpperCAmelCase , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=_UpperCAmelCase , default='''''' , help='''Can be used for distant debugging.''' )
__a = parser.parse_args()
print(_UpperCAmelCase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCAmelCase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__a = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
__a = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(_UpperCAmelCase , _UpperCAmelCase ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__a = ['''_start_''', '''_delimiter_''', '''_classify_''']
__a = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(_UpperCAmelCase )
__a = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
__a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(_UpperCAmelCase ) )
model.to(_UpperCAmelCase )
# Load and encode the datasets
def tokenize_and_encode(_UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCAmelCase ) )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return obj
return [tokenize_and_encode(_UpperCAmelCase ) for o in obj]
logger.info('''Encoding dataset...''' )
__a = load_rocstories_dataset(args.train_dataset )
__a = load_rocstories_dataset(args.eval_dataset )
__a = (train_dataset, eval_dataset)
__a = tokenize_and_encode(_UpperCAmelCase )
# Compute the max input length for the Transformer
__a = model.config.n_positions // 2 - 2
__a = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__a = min(_UpperCAmelCase , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__a = pre_process_datasets(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase )
__a , __a = tensor_datasets[0], tensor_datasets[1]
__a = TensorDataset(*_UpperCAmelCase )
__a = RandomSampler(_UpperCAmelCase )
__a = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=args.train_batch_size )
__a = TensorDataset(*_UpperCAmelCase )
__a = SequentialSampler(_UpperCAmelCase )
__a = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__a = args.max_steps
__a = args.max_steps // (len(_UpperCAmelCase ) // args.gradient_accumulation_steps) + 1
else:
__a = len(_UpperCAmelCase ) // args.gradient_accumulation_steps * args.num_train_epochs
__a = list(model.named_parameters() )
__a = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight''']
__a = [
{
'''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'''weight_decay''': args.weight_decay,
},
{'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0},
]
__a = AdamW(_UpperCAmelCase , lr=args.learning_rate , eps=args.adam_epsilon )
__a = get_linear_schedule_with_warmup(
_UpperCAmelCase , num_warmup_steps=args.warmup_steps , num_training_steps=_UpperCAmelCase )
if args.do_train:
__a , __a , __a = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
__a = 0
__a = 0
__a = tqdm(_UpperCAmelCase , desc='''Training''' )
for step, batch in enumerate(_UpperCAmelCase ):
__a = tuple(t.to(_UpperCAmelCase ) for t in batch )
__a , __a , __a , __a = batch
__a = model(_UpperCAmelCase , mc_token_ids=_UpperCAmelCase , lm_labels=_UpperCAmelCase , mc_labels=_UpperCAmelCase )
__a = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__a = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__a = '''Training loss: {:.2e} lr: {:.2e}'''.format(_UpperCAmelCase , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__a = model.module if hasattr(_UpperCAmelCase , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__a = os.path.join(args.output_dir , _UpperCAmelCase )
__a = os.path.join(args.output_dir , _UpperCAmelCase )
torch.save(model_to_save.state_dict() , _UpperCAmelCase )
model_to_save.config.to_json_file(_UpperCAmelCase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__a = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__a = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(_UpperCAmelCase )
if args.do_eval:
model.eval()
__a , __a = 0, 0
__a , __a = 0, 0
for batch in tqdm(_UpperCAmelCase , desc='''Evaluating''' ):
__a = tuple(t.to(_UpperCAmelCase ) for t in batch )
__a , __a , __a , __a = batch
with torch.no_grad():
__a , __a , __a , __a = model(
_UpperCAmelCase , mc_token_ids=_UpperCAmelCase , lm_labels=_UpperCAmelCase , mc_labels=_UpperCAmelCase )
__a = mc_logits.detach().cpu().numpy()
__a = mc_labels.to('''cpu''' ).numpy()
__a = accuracy(_UpperCAmelCase , _UpperCAmelCase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__a = eval_loss / nb_eval_steps
__a = eval_accuracy / nb_eval_examples
__a = tr_loss / nb_tr_steps if args.do_train else None
__a = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss}
__a = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(_UpperCAmelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _UpperCAmelCase , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 60 |
from collections.abc import Generator
from math import sin
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''08x''' )[-8:]
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __snake_case ( _UpperCAmelCase ):
__a = b''''''
for char in message:
bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' )
__a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCAmelCase ) , 512 ):
__a = bit_string[pos : pos + 512]
__a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''032b''' )
__a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCAmelCase , 2 )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (a + b) % 2**32
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __snake_case ( _UpperCAmelCase ):
__a = preprocess(_UpperCAmelCase )
__a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__a = 0X67_452_301
__a = 0Xef_cda_b89
__a = 0X98_bad_cfe
__a = 0X10_325_476
__a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCAmelCase ):
__a = aa
__a = ba
__a = ca
__a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__a = d ^ (b & (c ^ d))
__a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__a = c ^ (d & (b ^ c))
__a = (5 * i + 1) % 16
elif i <= 47:
__a = b ^ c ^ d
__a = (3 * i + 5) % 16
else:
__a = c ^ (b | not_aa(_UpperCAmelCase ))
__a = (7 * i) % 16
__a = (f + a + added_consts[i] + block_words[g]) % 2**32
__a = d
__a = c
__a = b
__a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 1 |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __snake_case ( _UpperCAmelCase ):
__a = torch.exp(_UpperCAmelCase )
__a = torch.sum(_UpperCAmelCase , dim=1 ) # sum of exp(x_i)
__a = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_UpperCAmelCase ) - B / A
class _A ( nn.Module ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
super().__init__()
__a = config.output_attentions
__a = config.output_hidden_states
__a = nn.ModuleList([BertLayer(__SCREAMING_SNAKE_CASE) for _ in range(config.num_hidden_layers)])
__a = nn.ModuleList([BertHighway(__SCREAMING_SNAKE_CASE) for _ in range(config.num_hidden_layers)])
__a = [-1 for _ in range(config.num_hidden_layers)]
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
if (type(__SCREAMING_SNAKE_CASE) is float) or (type(__SCREAMING_SNAKE_CASE) is int):
for i in range(len(self.early_exit_entropy)):
__a = x
else:
__a = x
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : int=None , ):
'''simple docstring'''
__a = ()
__a = ()
__a = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = layer_module(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , head_mask[i] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = layer_outputs[0]
if self.output_attentions:
__a = all_attentions + (layer_outputs[1],)
__a = (hidden_states,)
if self.output_hidden_states:
__a = current_outputs + (all_hidden_states,)
if self.output_attentions:
__a = current_outputs + (all_attentions,)
__a = self.highway[i](__SCREAMING_SNAKE_CASE)
# logits, pooled_output
if not self.training:
__a = highway_exit[0]
__a = entropy(__SCREAMING_SNAKE_CASE)
__a = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__a = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__a = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__SCREAMING_SNAKE_CASE , i + 1)
else:
__a = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__a = all_hidden_states + (hidden_states,)
__a = (hidden_states,)
if self.output_hidden_states:
__a = outputs + (all_hidden_states,)
if self.output_attentions:
__a = outputs + (all_attentions,)
__a = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' ,__UpperCAmelCase ,)
class _A ( __UpperCAmelCase ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE)
__a = config
__a = BertEmbeddings(__SCREAMING_SNAKE_CASE)
__a = DeeBertEncoder(__SCREAMING_SNAKE_CASE)
__a = BertPooler(__SCREAMING_SNAKE_CASE)
self.init_weights()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler)
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.embeddings.word_embeddings
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = value
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__SCREAMING_SNAKE_CASE)
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : str=None , ):
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''')
elif input_ids is not None:
__a = input_ids.size()
elif inputs_embeds is not None:
__a = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''')
__a = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__a = torch.ones(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE)
if encoder_attention_mask is None:
__a = torch.ones(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE)
if token_type_ids is None:
__a = torch.zeros(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__a = self.get_extended_attention_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__a = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__a = encoder_attention_mask[:, None, None, :]
__a = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
__a = (1.0 - encoder_extended_attention_mask) * -1_00_00.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__a = self.get_head_mask(__SCREAMING_SNAKE_CASE , self.config.num_hidden_layers)
__a = self.embeddings(
input_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE)
__a = self.encoder(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
__a = encoder_outputs[0]
__a = self.pooler(__SCREAMING_SNAKE_CASE)
__a = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _A ( __UpperCAmelCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = message
__a = exit_layer # start from 1!
class _A ( nn.Module ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
super().__init__()
__a = BertPooler(__SCREAMING_SNAKE_CASE)
__a = nn.Dropout(config.hidden_dropout_prob)
__a = nn.Linear(config.hidden_size , config.num_labels)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a = encoder_outputs[0]
__a = self.pooler(__SCREAMING_SNAKE_CASE)
# "return" pooler_output
# BertModel
__a = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__a = bmodel_output[1]
__a = self.dropout(__SCREAMING_SNAKE_CASE)
__a = self.classifier(__SCREAMING_SNAKE_CASE)
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' ,__UpperCAmelCase ,)
class _A ( __UpperCAmelCase ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
super().__init__(__SCREAMING_SNAKE_CASE)
__a = config.num_labels
__a = config.num_hidden_layers
__a = DeeBertModel(__SCREAMING_SNAKE_CASE)
__a = nn.Dropout(config.hidden_dropout_prob)
__a = nn.Linear(config.hidden_size , self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[Any]=-1 , __SCREAMING_SNAKE_CASE : str=False , ):
'''simple docstring'''
__a = self.num_layers
try:
__a = self.bert(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , position_ids=__SCREAMING_SNAKE_CASE , head_mask=__SCREAMING_SNAKE_CASE , inputs_embeds=__SCREAMING_SNAKE_CASE , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__a = outputs[1]
__a = self.dropout(__SCREAMING_SNAKE_CASE)
__a = self.classifier(__SCREAMING_SNAKE_CASE)
__a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__a = e.message
__a = e.exit_layer
__a = outputs[0]
if not self.training:
__a = entropy(__SCREAMING_SNAKE_CASE)
__a = []
__a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(logits.view(-1) , labels.view(-1))
else:
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
__a = []
for highway_exit in outputs[-1]:
__a = highway_exit[0]
if not self.training:
highway_logits_all.append(__SCREAMING_SNAKE_CASE)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
__a = CrossEntropyLoss()
__a = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(__SCREAMING_SNAKE_CASE)
if train_highway:
__a = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
__a = (loss,) + outputs
if not self.training:
__a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 60 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__snake_case :List[Any] = '''zero2'''
__snake_case :Optional[Any] = '''zero3'''
__snake_case :str = [ZEROa, ZEROa]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
__snake_case :List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _A ( __UpperCAmelCase ):
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
self.do_checks(__SCREAMING_SNAKE_CASE)
return output_dir
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE)
__a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__a = self.get_launcher(__SCREAMING_SNAKE_CASE)
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env())
return output_dir
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False):
'''simple docstring'''
__a = min(2 , get_gpu_count()) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 60 | 1 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(_UpperCAmelCase , n - 1 , _UpperCAmelCase ) * a) % mod
else:
__a = binary_exponentiation(_UpperCAmelCase , n / 2 , _UpperCAmelCase )
return (b * b) % mod
# a prime number
__snake_case :List[Any] = 701
__snake_case :Dict = 10_0000_0000
__snake_case :Optional[Any] = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 60 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = f'Expected string as input, found {type(_UpperCAmelCase )}'
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = f'Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}'
raise ValueError(_UpperCAmelCase )
__a = input_str.split('''_''' )
__a = 0 if use_pascal else 1
__a = words[start_index:]
__a = [word[0].upper() + word[1:] for word in words_to_capitalize]
__a = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 60 | 1 |
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def __snake_case ( _UpperCAmelCase ):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def __snake_case ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def __snake_case ( ):
__a = '''mock-s3-bucket'''
__a = f's3://{mock_bucket}'
__a = extract_path_from_uri(_UpperCAmelCase )
assert dataset_path.startswith('''s3://''' ) is False
__a = '''./local/path'''
__a = extract_path_from_uri(_UpperCAmelCase )
assert dataset_path == new_dataset_path
def __snake_case ( _UpperCAmelCase ):
__a = is_remote_filesystem(_UpperCAmelCase )
assert is_remote is True
__a = fsspec.filesystem('''file''' )
__a = is_remote_filesystem(_UpperCAmelCase )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , _UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
__a = input_paths[compression_fs_class.protocol]
if input_path is None:
__a = f'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCAmelCase )
__a = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
__a = os.path.basename(_UpperCAmelCase )
__a = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(_UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as f, open(_UpperCAmelCase , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
__a = compressed_file_paths[protocol]
__a = '''dataset.jsonl'''
__a = f'{protocol}://{member_file_path}::{compressed_file_path}'
__a , *__a = fsspec.get_fs_token_paths(_UpperCAmelCase )
assert fs.isfile(_UpperCAmelCase )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = hf_api.dataset_info(_UpperCAmelCase , token=_UpperCAmelCase )
__a = HfFileSystem(repo_info=_UpperCAmelCase , token=_UpperCAmelCase )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(_UpperCAmelCase ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def __snake_case ( ):
__a = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCAmelCase , _UpperCAmelCase , clobber=_UpperCAmelCase )
with pytest.warns(_UpperCAmelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCAmelCase ) == 1
assert (
str(warning_info[0].message )
== f'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 60 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__snake_case :List[str] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class _A :
UpperCamelCase__ : str
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a , __a , __a = _str_to_version_tuple(self.version_str)
def __repr__( self : Tuple):
'''simple docstring'''
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.major, self.minor, self.patch
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return Version(__SCREAMING_SNAKE_CASE)
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return other
raise TypeError(F'{other} (type {type(__SCREAMING_SNAKE_CASE)}) cannot be compared to version.')
def __eq__( self : int , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
try:
__a = self._validate_operand(__SCREAMING_SNAKE_CASE)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : str , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = self._validate_operand(__SCREAMING_SNAKE_CASE)
return self.tuple < other.tuple
def __hash__( self : Optional[Any]):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.version_str
def __snake_case ( _UpperCAmelCase ):
__a = _VERSION_REG.match(_UpperCAmelCase )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(_UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def __snake_case ( _UpperCAmelCase ):
return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__snake_case :int = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[Any] = ['''BartphoTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__snake_case :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case :int = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _A ( tr.AbstractTransform ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "):
'''simple docstring'''
__a = sentence_delimiter
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return list(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = []
for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE):
chars.extend(self.process_string(__SCREAMING_SNAKE_CASE))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1:
chars.append(self.sentence_delimiter)
return chars
__snake_case :Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case :Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case :Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case :Tuple = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case :Tuple = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 60 | 1 |
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__snake_case :Tuple = logging.get_logger(__name__)
__snake_case :str = TypeVar('''DatasetType''', Dataset, IterableDataset)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(_UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(_UpperCAmelCase )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCAmelCase ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCAmelCase ).__name__}.' )
if i == 0:
__a , __a = (
(Dataset, IterableDataset) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , stopping_strategy=_UpperCAmelCase )
else:
return _interleave_iterable_datasets(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , stopping_strategy=_UpperCAmelCase )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = 0 , ):
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(_UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'''is an empty dataset dictionary.''' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(_UpperCAmelCase )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCAmelCase ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCAmelCase ).__name__}.' )
if i == 0:
__a , __a = (
(Dataset, IterableDataset) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , axis=_UpperCAmelCase )
else:
return _concatenate_iterable_datasets(_UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , axis=_UpperCAmelCase )
| 60 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case :Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = ['''ViTFeatureExtractor''']
__snake_case :Optional[Any] = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
def __snake_case ( _UpperCAmelCase ):
__a = 0
while len(_UpperCAmelCase ) > 1:
__a = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
__a = files.index(min(_UpperCAmelCase ) )
temp += files[min_index]
files.pop(_UpperCAmelCase )
files.append(_UpperCAmelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[str] = GPTSwaTokenizer
UpperCamelCase__ : Dict = False
UpperCamelCase__ : int = True
UpperCamelCase__ : List[Any] = False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = '''This is a test'''
__a = '''This is a test'''
return input_text, output_text
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = '''<s>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2_000)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_000)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [465, 287, 265, 631, 842])
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__a = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# Test that decode_fast returns the input text
for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
| 60 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def __snake_case ( _UpperCAmelCase ):
# getting number of pixels in the image
__a , __a = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__a = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__snake_case :int = imread('''image_data/lena.jpg''', 1)
# convert to its negative
__snake_case :Any = convert_to_negative(img)
# show result image
imshow('''negative of original image''', img)
waitKey(0)
destroyAllWindows()
| 60 |
from __future__ import annotations
__snake_case :Optional[Any] = []
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if row >= len(_UpperCAmelCase ):
solution.append(_UpperCAmelCase )
printboard(_UpperCAmelCase )
print()
return True
for i in range(len(_UpperCAmelCase ) ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = 1
solve(_UpperCAmelCase , row + 1 )
__a = 0
return False
def __snake_case ( _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(_UpperCAmelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__snake_case :Optional[Any] = 8
__snake_case :Tuple = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 60 | 1 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
while b:
__a , __a = b, a % b
return a
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return a if b == 0 else euclidean_gcd_recursive(_UpperCAmelCase , a % b )
def __snake_case ( ):
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 60 |
def __snake_case ( _UpperCAmelCase ):
__a = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __snake_case ( _UpperCAmelCase ):
__a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__a = remove_duplicates(key.upper() )
__a = len(_UpperCAmelCase )
# First fill cipher with key characters
__a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_UpperCAmelCase ) , 26 ):
__a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__a = alphabet[i - offset]
__a = char
return cipher_alphabet
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( ):
__a = input('''Enter message to encode or decode: ''' ).strip()
__a = input('''Enter keyword: ''' ).strip()
__a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__a = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__a = create_cipher_map(_UpperCAmelCase )
print(func(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 1 |
def __snake_case ( _UpperCAmelCase = 1000000 ):
__a = 1
__a = 1
__a = {1: 1}
for inputa in range(2 , _UpperCAmelCase ):
__a = 0
__a = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__a = (3 * number) + 1
counter += 1
if inputa not in counters:
__a = counter
if counter > pre_counter:
__a = inputa
__a = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 60 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__snake_case :List[Any] = None
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case :Union[str, Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__snake_case :Optional[Any] = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__snake_case :Optional[int] = '''▁'''
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : Dict = BarthezTokenizer
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = vocab_file
__a = False if not self.vocab_file else True
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 60 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : str = BlenderbotSmallTokenizer
UpperCamelCase__ : Optional[int] = False
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
super().setUp()
__a = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
__a = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE))))
__a = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
__a = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = '''adapt act apte'''
__a = '''adapt act apte'''
return input_text, output_text
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
__a = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
__a = '''adapt act apte'''
__a = ['''adapt''', '''act''', '''ap@@''', '''te''']
__a = tokenizer.tokenize(__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__a = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''')
assert tok('''sam''').input_ids == [1_384]
__a = '''I am a small frog.'''
__a = tok([src_text] , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE)['''input_ids''']
__a = tok.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE)[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''')
__a = '''I am a small frog .'''
__a = '''.'''
__a = tok(__SCREAMING_SNAKE_CASE)['''input_ids''']
__a = tok(__SCREAMING_SNAKE_CASE)['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 60 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__snake_case :Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__snake_case :Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __snake_case ( _UpperCAmelCase ):
__a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(rows * cols * num_images )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
__a = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 )
return data
@deprecated(_UpperCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = labels_dense.shape[0]
__a = numpy.arange(_UpperCAmelCase ) * num_classes
__a = numpy.zeros((num_labels, num_classes) )
__a = 1
return labels_one_hot
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(_UpperCAmelCase )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase )
return labels
class _A :
@deprecated(
__SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Any=dtypes.floataa , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=None , ):
'''simple docstring'''
__a , __a = random_seed.get_seed(__SCREAMING_SNAKE_CASE)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
__a = dtypes.as_dtype(__SCREAMING_SNAKE_CASE).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype)
if fake_data:
__a = 10_000
__a = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
__a = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__a = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__a = images.astype(numpy.floataa)
__a = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_55.0)
__a = images
__a = labels
__a = 0
__a = 0
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self._images
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self._labels
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._num_examples
@property
def _lowerCamelCase ( self : str):
'''simple docstring'''
return self._epochs_completed
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True):
'''simple docstring'''
if fake_data:
__a = [1] * 784
__a = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__SCREAMING_SNAKE_CASE)],
[fake_label for _ in range(__SCREAMING_SNAKE_CASE)],
)
__a = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perma]
__a = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__a = self._num_examples - start
__a = self._images[start : self._num_examples]
__a = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perm]
__a = self.labels[perm]
# Start next epoch
__a = 0
__a = batch_size - rest_num_examples
__a = self._index_in_epoch
__a = self._images[start:end]
__a = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0),
)
else:
self._index_in_epoch += batch_size
__a = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase , '''Please write your own downloading logic.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
__a = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
__a = f.size()
print('''Successfully downloaded''' , _UpperCAmelCase , _UpperCAmelCase , '''bytes.''' )
return filepath
@deprecated(
_UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=5000 , _UpperCAmelCase=None , _UpperCAmelCase=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase )
__a = fake()
__a = fake()
__a = fake()
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
if not source_url: # empty string check
__a = DEFAULT_SOURCE_URL
__a = '''train-images-idx3-ubyte.gz'''
__a = '''train-labels-idx1-ubyte.gz'''
__a = '''t10k-images-idx3-ubyte.gz'''
__a = '''t10k-labels-idx1-ubyte.gz'''
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
__a = (
'''Validation size should be between 0 and '''
f'{len(_UpperCAmelCase )}. Received: {validation_size}.'
)
raise ValueError(_UpperCAmelCase )
__a = train_images[:validation_size]
__a = train_labels[:validation_size]
__a = train_images[validation_size:]
__a = train_labels[validation_size:]
__a = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
| 60 | 1 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Optional[int] = logging.get_logger(__name__)
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm1.weight', f'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm1.bias', f'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.weight', f'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.attn.proj.bias', f'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.norm2.weight', f'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.norm2.bias', f'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.weight', f'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc1.bias', f'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(f'encoder.deit.blocks.{i}.mlp.fc2.weight', f'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((f'encoder.deit.blocks.{i}.mlp.fc2.bias', f'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__a = state_dict.pop(f'encoder.deit.blocks.{i}.attn.qkv.weight' )
__a = in_proj_weight[
: encoder_config.hidden_size, :
]
__a = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__a = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = dct.pop(_UpperCAmelCase )
__a = val
def __snake_case ( _UpperCAmelCase ):
if "handwritten" in checkpoint_url:
__a = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__a = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
__a = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = ViTConfig(image_size=384 , qkv_bias=_UpperCAmelCase )
__a = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__a = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
__a = 1024
__a = 4096
__a = 24
__a = 16
__a = 1024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__a = False
__a = '''relu'''
__a = 1024
__a = True
__a = False
__a = False
# load HuggingFace model
__a = ViTModel(_UpperCAmelCase , add_pooling_layer=_UpperCAmelCase )
__a = TrOCRForCausalLM(_UpperCAmelCase )
__a = VisionEncoderDecoderModel(encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
__a = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='''cpu''' , check_hash=_UpperCAmelCase )['''model''']
__a = create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__a = state_dict.pop(_UpperCAmelCase )
if key.startswith('''decoder''' ) and "output_projection" not in key:
__a = val
else:
__a = val
# load state dict
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image
__a = ViTImageProcessor(size=encoder_config.image_size )
__a = RobertaTokenizer.from_pretrained('''roberta-large''' )
__a = TrOCRProcessor(_UpperCAmelCase , _UpperCAmelCase )
__a = processor(images=prepare_img(_UpperCAmelCase ) , return_tensors='''pt''' ).pixel_values
# verify logits
__a = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__a = model(pixel_values=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
__a = outputs.logits
__a = torch.Size([1, 1, 50265] )
if "trocr-base-handwritten" in checkpoint_url:
__a = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
__a = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
__a = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
__a = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _UpperCAmelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :int = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__snake_case :Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 60 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : List[Any]=18 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : int=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=False , ):
'''simple docstring'''
__a = size if size is not None else {'''height''': 20, '''width''': 20}
__a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_reduce_labels
def _lowerCamelCase ( self : str):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(dataset[0]['''file'''] )
__a = Image.open(dataset[1]['''file'''] )
return image, map
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(ds[0]['''file'''] )
__a = Image.open(ds[1]['''file'''] )
__a = Image.open(ds[2]['''file'''] )
__a = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = BeitImageProcessingTester(self)
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std'''))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
__a = []
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
__a = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test not batched input (PIL images)
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched input (PIL images)
__a , __a = prepare_semantic_batch_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 150)
__a = True
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
| 60 | 1 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__snake_case :Any = datasets.utils.logging.get_logger(__name__)
class _A ( folder_based_builder.FolderBasedBuilderConfig ):
UpperCamelCase__ : bool = None
UpperCamelCase__ : bool = None
class _A ( folder_based_builder.FolderBasedBuilder ):
UpperCamelCase__ : Optional[int] = datasets.Audio()
UpperCamelCase__ : int = '''audio'''
UpperCamelCase__ : Optional[int] = AudioFolderConfig
UpperCamelCase__ : List[str] # definition at the bottom of the script
UpperCamelCase__ : str = AudioClassification(audio_column='''audio''' ,label_column='''label''' )
__snake_case :Optional[int] = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
__snake_case :Dict = AUDIO_EXTENSIONS
| 60 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : int):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''])
for i, r in enumerate(__SCREAMING_SNAKE_CASE):
self.assertDictEqual(__SCREAMING_SNAKE_CASE , example_records[i])
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
__a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def _lowerCamelCase ( self : int): # checks what happens with missing columns
'''simple docstring'''
__a = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertDictEqual(dset[0] , {'''col_1''': 1})
self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns
def _lowerCamelCase ( self : Optional[Any]): # checks if the type can be inferred from the second record
'''simple docstring'''
__a = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''')))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = Dataset.from_list([])
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 0)
self.assertListEqual(dset.column_names , [])
| 60 | 1 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Union[str, Any] = ['''image_processor''', '''tokenizer''']
UpperCamelCase__ : List[str] = '''FlavaImageProcessor'''
UpperCamelCase__ : str = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
__a = kwargs.pop('''feature_extractor''')
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = self.image_processor
def __call__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[ImageInput] = None , __SCREAMING_SNAKE_CASE : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = False , __SCREAMING_SNAKE_CASE : Union[bool, str, TruncationStrategy] = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : int = 0 , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : Optional[bool] = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
__a = self.tokenizer(
text=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , stride=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , return_special_tokens_mask=__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , return_length=__SCREAMING_SNAKE_CASE , verbose=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if images is not None:
__a = self.image_processor(
__SCREAMING_SNAKE_CASE , return_image_mask=__SCREAMING_SNAKE_CASE , return_codebook_pixels=__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
if text is not None and images is not None:
encoding.update(__SCREAMING_SNAKE_CASE)
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE) , tensor_type=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : Dict):
'''simple docstring'''
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Union[str, Any] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , __SCREAMING_SNAKE_CASE , )
return self.image_processor
| 60 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( _UpperCAmelCase ):
__a = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __snake_case ( _UpperCAmelCase ):
__a = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def __snake_case ( ):
__a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a = [2, 2, 20]
__a = [3, 12, 16]
__a = [192, 768, 1024]
__a = CvtForImageClassification(_UpperCAmelCase )
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__a = image_size
__a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) )
__a = OrderedDict()
__a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__a = list_of_state_dict + cls_token(_UpperCAmelCase )
__a = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
__a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__snake_case :str = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case :Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 60 | 1 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__snake_case :List[str] = HfApi()
__snake_case :str = {}
# fmt: off
__snake_case :Optional[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
__snake_case :Union[str, Any] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
__snake_case :str = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
__snake_case :List[Any] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
__snake_case :Any = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
__snake_case :List[str] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
__snake_case :Optional[int] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
__snake_case :Tuple = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
__snake_case :List[Any] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
__snake_case :Optional[Any] = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
__snake_case :Optional[Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
__snake_case :List[str] = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
__snake_case :Any = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
__snake_case :List[str] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
__snake_case :Union[str, Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
__snake_case :List[Any] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
__snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__snake_case :str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__snake_case :List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__snake_case :Any = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 60 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( _UpperCAmelCase ):
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class _A ( __UpperCAmelCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
@torch.no_grad()
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = 1
elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
__a = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}')
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = preprocess(__SCREAMING_SNAKE_CASE)
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters()).dtype
__a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE)
__a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(__SCREAMING_SNAKE_CASE):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1)
__a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# predict the noise residual
__a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample
__a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0)
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
| 60 | 1 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = len(set_a.intersection(_UpperCAmelCase ) )
if alternative_union:
__a = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
else:
__a = len(set_a.union(_UpperCAmelCase ) )
return intersection / union
if isinstance(_UpperCAmelCase , (list, tuple) ) and isinstance(_UpperCAmelCase , (list, tuple) ):
__a = [element for element in set_a if element in set_b]
if alternative_union:
__a = len(_UpperCAmelCase ) + len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / union
else:
__a = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return len(_UpperCAmelCase ) / len(_UpperCAmelCase )
return None
if __name__ == "__main__":
__snake_case :Any = {'''a''', '''b''', '''c''', '''d''', '''e'''}
__snake_case :Optional[int] = {'''c''', '''d''', '''e''', '''f''', '''h''', '''i'''}
print(jaccard_similarity(set_a, set_b))
| 60 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__snake_case :Any = TypeVar('''KT''')
__snake_case :List[str] = TypeVar('''VT''')
class _A ( Generic[KT, VT] ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None):
'''simple docstring'''
__a = key
__a = value
__a = []
def __repr__( self : Dict):
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return len(self.forward)
class _A ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16):
'''simple docstring'''
__a = Node[KT, VT]()
__a = 0
__a = p
__a = max_level
def __str__( self : Union[str, Any]):
'''simple docstring'''
__a = list(self)
if len(__SCREAMING_SNAKE_CASE) == 0:
return F'SkipList(level={self.level})'
__a = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4)
__a = max(__SCREAMING_SNAKE_CASE , 4) + 4
__a = self.head
__a = []
__a = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + '''* ''' * len(__SCREAMING_SNAKE_CASE))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
while len(node.forward) != 0:
__a = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''')
+ ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
__a = node.forward
lines.append('''None'''.ljust(__SCREAMING_SNAKE_CASE) + '''* ''' * len(__SCREAMING_SNAKE_CASE))
return F'SkipList(level={self.level})\n' + "\n".join(__SCREAMING_SNAKE_CASE)
def __iter__( self : int):
'''simple docstring'''
__a = self.head
while len(node.forward) != 0:
yield node.forward[0].key
__a = node.forward[0]
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = []
__a = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__a = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : KT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__a = node.forward[i]
else:
__a = update_node.forward[:i]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
__a = value
else:
__a = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE):
update_vector.append(self.head)
__a = level
__a = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE)
else:
__a = new_node
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
return node.value
return None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
if len(_UpperCAmelCase ) != 4:
print()
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __snake_case ( ):
__a = SkipList()
assert skip_list.find('''Some key''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def __snake_case ( ):
__a = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(_UpperCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __snake_case ( ):
def is_sorted(_UpperCAmelCase ):
return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) )
__a = SkipList()
for i in range(10 ):
skip_list.insert(_UpperCAmelCase , _UpperCAmelCase )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCAmelCase ) )
def __snake_case ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __snake_case ( ):
__a = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 1 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _A ( tf.keras.layers.Layer ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Dict[str, int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int = None , __SCREAMING_SNAKE_CASE : int = None):
'''simple docstring'''
super().__init__()
__a = pad_token_id
__a = max_length
__a = vocab
__a = merges
__a = BytePairTokenizer(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sequence_length=__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Tuple , __SCREAMING_SNAKE_CASE : GPTaTokenizer , *__SCREAMING_SNAKE_CASE : int , **__SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a = [''' '''.join(__SCREAMING_SNAKE_CASE) for m in tokenizer.bpe_ranks.keys()]
__a = tokenizer.get_vocab()
return cls(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = GPTaTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
return cls.from_tokenizer(__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
return cls(**__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int = None):
'''simple docstring'''
__a = self.tf_tokenizer(__SCREAMING_SNAKE_CASE)
__a = tf.ones_like(__SCREAMING_SNAKE_CASE)
if self.pad_token_id is not None:
# pad the tokens up to max length
__a = max_length if max_length is not None else self.max_length
if max_length is not None:
__a , __a = pad_model_inputs(
__SCREAMING_SNAKE_CASE , max_seq_length=__SCREAMING_SNAKE_CASE , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 60 |
__snake_case :str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Return True if there is node that has not iterated.
__a = [False] * len(_UpperCAmelCase )
__a = [s]
__a = True
while queue:
__a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCAmelCase )
__a = True
__a = u
return visited[t]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [-1] * (len(_UpperCAmelCase ))
__a = 0
__a = []
__a = [i[:] for i in graph] # Record original cut, copy.
while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = float('''Inf''' )
__a = sink
while s != source:
# Find the minimum value in select path
__a = min(_UpperCAmelCase , graph[parent[s]][s] )
__a = parent[s]
max_flow += path_flow
__a = sink
while v != source:
__a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a = parent[v]
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 60 | 1 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __snake_case ( _UpperCAmelCase ):
for param in module.parameters():
__a = False
def __snake_case ( ):
__a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__a = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __snake_case ( _UpperCAmelCase ):
__a = plt.imshow(_UpperCAmelCase )
fig.axes.get_xaxis().set_visible(_UpperCAmelCase )
fig.axes.get_yaxis().set_visible(_UpperCAmelCase )
plt.show()
def __snake_case ( ):
__a = datetime.now()
__a = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 60 |
from __future__ import annotations
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(_UpperCAmelCase ):
print(f'{i}\t\t{d}' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [float('''inf''' )] * vertex_count
__a = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__a = distance[u] + w
__a = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case :Dict = int(input('''Enter number of vertices: ''').strip())
__snake_case :Any = int(input('''Enter number of edges: ''').strip())
__snake_case :list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__snake_case ,__snake_case ,__snake_case :int = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__snake_case :Any = {'''src''': src, '''dst''': dest, '''weight''': weight}
__snake_case :List[str] = int(input('''\nEnter shortest path source:''').strip())
__snake_case :Optional[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 60 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Optional[Any] = logging.get_logger()
@dataclass
class _A :
UpperCamelCase__ : nn.Module
UpperCamelCase__ : List[nn.Module] = field(default_factory=__UpperCAmelCase )
UpperCamelCase__ : list = field(default_factory=__UpperCAmelCase )
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tensor , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
__a = len(list(m.modules())) == 1 or isinstance(__SCREAMING_SNAKE_CASE , nn.Convad) or isinstance(__SCREAMING_SNAKE_CASE , nn.BatchNormad)
if has_not_submodules:
self.traced.append(__SCREAMING_SNAKE_CASE)
def __call__( self : str , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(__SCREAMING_SNAKE_CASE)
[x.remove() for x in self.handles]
return self
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return list(filter(lambda __SCREAMING_SNAKE_CASE: len(list(x.state_dict().keys())) > 0 , self.traced))
@dataclass
class _A :
UpperCamelCase__ : nn.Module
UpperCamelCase__ : nn.Module
UpperCamelCase__ : int = 0
UpperCamelCase__ : List = field(default_factory=__UpperCAmelCase )
UpperCamelCase__ : List = field(default_factory=__UpperCAmelCase )
def __call__( self : int , __SCREAMING_SNAKE_CASE : Tensor):
'''simple docstring'''
__a = Tracker(self.dest)(__SCREAMING_SNAKE_CASE).parametrized
__a = Tracker(self.src)(__SCREAMING_SNAKE_CASE).parametrized
__a = list(filter(lambda __SCREAMING_SNAKE_CASE: type(__SCREAMING_SNAKE_CASE) not in self.src_skip , __SCREAMING_SNAKE_CASE))
__a = list(filter(lambda __SCREAMING_SNAKE_CASE: type(__SCREAMING_SNAKE_CASE) not in self.dest_skip , __SCREAMING_SNAKE_CASE))
if len(__SCREAMING_SNAKE_CASE) != len(__SCREAMING_SNAKE_CASE):
raise Exception(
F'Numbers of operations are different. Source module has {len(__SCREAMING_SNAKE_CASE)} operations while'
F' destination module has {len(__SCREAMING_SNAKE_CASE)}.')
for dest_m, src_m in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
dest_m.load_state_dict(src_m.state_dict())
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}')
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = True ):
print(f'Converting {name}...' )
with torch.no_grad():
__a = timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ).eval()
__a = ResNetForImageClassification(_UpperCAmelCase ).eval()
__a = ModuleTransfer(src=_UpperCAmelCase , dest=_UpperCAmelCase )
__a = torch.randn((1, 3, 224, 224) )
module_transfer(_UpperCAmelCase )
assert torch.allclose(from_model(_UpperCAmelCase ) , our_model(_UpperCAmelCase ).logits ), "The model logits don't match the original one."
__a = f'resnet{"-".join(name.split("resnet" ) )}'
print(_UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add model''' , use_temp_dir=_UpperCAmelCase , )
# we can use the convnext one
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='''Add image processor''' , use_temp_dir=_UpperCAmelCase , )
print(f'Pushed {checkpoint_name}' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = True ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = (1, num_labels)
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = partial(_UpperCAmelCase , num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
__a = {
'''resnet18''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet26''': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet34''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='''basic''' ),
'''resnet50''': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet101''': ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
'''resnet152''': ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='''bottleneck''' ),
}
if model_name:
convert_weight_and_push(_UpperCAmelCase , names_to_config[model_name] , _UpperCAmelCase , _UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
__snake_case :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__snake_case :Optional[Any] = parser.parse_args()
__snake_case :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 60 |
import os
import sys
import unittest
__snake_case :Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__snake_case :List[str] = os.path.join(git_repo_path, '''src''', '''transformers''')
__snake_case :Any = '''
{0} = None
'''
__snake_case :Dict = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
__snake_case :str = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(__SCREAMING_SNAKE_CASE)
__a = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''tokenizers''')
__a = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''tensorflow_text''')
__a = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers''')
__a = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tensorflow_text''')
__a = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers_and_vision''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''tensorflow_text''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''sentencepiece_and_tokenizers''' , __SCREAMING_SNAKE_CASE)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''\nCONSTANT = None\n''')
__a = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
__SCREAMING_SNAKE_CASE , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
__a = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__a = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__a = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , __SCREAMING_SNAKE_CASE)
| 60 | 1 |
import sys
def __snake_case ( _UpperCAmelCase ):
__a = len(_UpperCAmelCase )
__a = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )]
__a = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )]
for chain_length in range(2 , _UpperCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
__a = a + chain_length - 1
__a = sys.maxsize
for c in range(_UpperCAmelCase , _UpperCAmelCase ):
__a = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__a = cost
__a = c
return matrix, sol
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if i == j:
print('''A''' + str(_UpperCAmelCase ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(_UpperCAmelCase , _UpperCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(_UpperCAmelCase , optimal_solution[i][j] + 1 , _UpperCAmelCase )
print(''')''' , end=''' ''' )
def __snake_case ( ):
__a = [30, 35, 15, 5, 10, 20, 25]
__a = len(_UpperCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__a , __a = matrix_chain_order(_UpperCAmelCase )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(_UpperCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 60 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__snake_case :str = get_logger()
__snake_case :Optional[dict] = None
class _A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
super().__init__(features=__SCREAMING_SNAKE_CASE)
import jax
from jaxlib.xla_client import Device
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(
F'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` '
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''')
__a = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default '
F'device: {str(jax.devices()[0])}.')
__a = str(jax.devices()[0])
__a = jnp_array_kwargs
@staticmethod
def _lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(__SCREAMING_SNAKE_CASE): device for device in jax.devices()}
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and column:
if all(
isinstance(__SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0)
return column
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE))):
return value
elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__a = {}
if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__a = {'''dtype''': jnp.intaa}
else:
__a = {'''dtype''': jnp.intaa}
elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__a = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = np.asarray(__SCREAMING_SNAKE_CASE)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs})
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''') and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array):
__a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)):
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
return self._tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : dict):
'''simple docstring'''
return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE)
return self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0])
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
__a = self._consolidate(__SCREAMING_SNAKE_CASE)
return column
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE)
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
for column_name in batch:
__a = self._consolidate(batch[column_name])
return batch
| 60 | 1 |
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 60 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case :Tuple = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case :Union[str, Any] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0522, type=int)
__snake_case :List[str] = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
__snake_case :Optional[Any] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__snake_case :Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case :Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
__snake_case :Any = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60 | 1 |
from collections.abc import Iterable
from typing import Any
class _A :
def __init__( self : int , __SCREAMING_SNAKE_CASE : int | None = None):
'''simple docstring'''
__a = value
__a = None # Added in order to delete a node easier
__a = None
__a = None
def __repr__( self : str):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value)
return pformat({F'{self.value}': (self.left, self.right)} , indent=1)
class _A :
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Node | None = None):
'''simple docstring'''
__a = root
def __str__( self : Union[str, Any]):
'''simple docstring'''
return str(self.root)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Node , __SCREAMING_SNAKE_CASE : Node | None):
'''simple docstring'''
if new_children is not None: # reset its kids
__a = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__SCREAMING_SNAKE_CASE): # If it is the right children
__a = new_children
else:
__a = new_children
else:
__a = new_children
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Node):
'''simple docstring'''
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return self.root is None
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = Node(__SCREAMING_SNAKE_CASE) # create a new Node
if self.empty(): # if Tree is empty
__a = new_node # set its root
else: # Tree is not empty
__a = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
__a = new_node # We insert the new node in a leaf
break
else:
__a = parent_node.left
else:
if parent_node.right is None:
__a = new_node
break
else:
__a = parent_node.right
__a = parent_node
def _lowerCamelCase ( self : List[str] , *__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
for value in values:
self.__insert(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
if self.empty():
raise IndexError('''Warning: Tree is empty! please use another.''')
else:
__a = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
__a = node.left if value < node.value else node.right
return node
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Node | None = None):
'''simple docstring'''
if node is None:
if self.root is None:
return None
__a = self.root
if not self.empty():
while node.right is not None:
__a = node.right
return node
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Node | None = None):
'''simple docstring'''
if node is None:
__a = self.root
if self.root is None:
return None
if not self.empty():
__a = self.root
while node.left is not None:
__a = node.left
return node
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = self.search(__SCREAMING_SNAKE_CASE) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
elif node.left is None: # Has only right children
self.__reassign_nodes(__SCREAMING_SNAKE_CASE , node.right)
elif node.right is None: # Has only left children
self.__reassign_nodes(__SCREAMING_SNAKE_CASE , node.left)
else:
__a = self.get_max(
node.left) # Gets the max value of the left branch
self.remove(tmp_node.value) # type: ignore
__a = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Node | None):
'''simple docstring'''
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left)
yield from self.preorder_traverse(node.right)
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Dict=None):
'''simple docstring'''
if traversal_function is None:
return self.preorder_traverse(self.root)
else:
return traversal_function(self.root)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : Node | None):
'''simple docstring'''
if node:
self.inorder(__SCREAMING_SNAKE_CASE , node.left)
arr.append(node.value)
self.inorder(__SCREAMING_SNAKE_CASE , node.right)
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Node):
'''simple docstring'''
__a = []
self.inorder(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) # append all values to list using inorder traversal
return arr[k - 1]
def __snake_case ( _UpperCAmelCase ):
__a = []
if curr_node is not None:
__a = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __snake_case ( ):
__a = (8, 3, 6, 1, 10, 14, 13, 4, 7)
__a = BinarySearchTree()
for i in testlist:
t.insert(_UpperCAmelCase )
# Prints all the elements of the list in order traversal
print(_UpperCAmelCase )
if t.search(6 ) is not None:
print('''The value 6 exists''' )
else:
print('''The value 6 doesn\'t exist''' )
if t.search(-1 ) is not None:
print('''The value -1 exists''' )
else:
print('''The value -1 doesn\'t exist''' )
if not t.empty():
print('''Max Value: ''' , t.get_max().value ) # type: ignore
print('''Min Value: ''' , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_UpperCAmelCase )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 60 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
__snake_case :List[str] = HfApi()
__snake_case :str = {}
# fmt: off
__snake_case :Optional[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
__snake_case :Union[str, Any] = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
__snake_case :str = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
__snake_case :List[Any] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
__snake_case :Any = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
__snake_case :List[str] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
__snake_case :Optional[int] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
__snake_case :Tuple = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
__snake_case :List[Any] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
__snake_case :Optional[Any] = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
__snake_case :Optional[Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
__snake_case :List[str] = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
__snake_case :Any = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
__snake_case :List[str] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
__snake_case :Union[str, Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
__snake_case :List[Any] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
__snake_case :List[str] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(f'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
__snake_case :Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
__snake_case :str = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
__snake_case :List[Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
__snake_case :List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
__snake_case :Any = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(f'{mod.modelId} has passed successfully!!!')
| 60 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__snake_case :int = logging.getLogger(__name__)
@dataclass(frozen=__UpperCAmelCase )
class _A :
UpperCamelCase__ : str
UpperCamelCase__ : str
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[str] = None
@dataclass(frozen=__UpperCAmelCase )
class _A :
UpperCamelCase__ : List[int]
UpperCamelCase__ : Optional[List[int]] = None
UpperCamelCase__ : Optional[List[int]] = None
UpperCamelCase__ : Optional[Union[int, float]] = None
UpperCamelCase__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : List[InputFeatures]
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] = None , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : bool = False , ):
'''simple docstring'''
__a = hans_processors[task]()
__a = os.path.join(
__SCREAMING_SNAKE_CASE , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE , ) , )
__a = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__a , __a = label_list[2], label_list[1]
__a = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__a = cached_features_file + '''.lock'''
with FileLock(__SCREAMING_SNAKE_CASE):
if os.path.exists(__SCREAMING_SNAKE_CASE) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}')
__a = torch.load(__SCREAMING_SNAKE_CASE)
else:
logger.info(F'Creating features from dataset file at {data_dir}')
__a = (
processor.get_dev_examples(__SCREAMING_SNAKE_CASE) if evaluate else processor.get_train_examples(__SCREAMING_SNAKE_CASE)
)
logger.info('''Training examples: %s''' , len(__SCREAMING_SNAKE_CASE))
__a = hans_convert_examples_to_features(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
logger.info('''Saving features into cached file %s''' , __SCREAMING_SNAKE_CASE)
torch.save(self.features , __SCREAMING_SNAKE_CASE)
def __len__( self : Tuple):
'''simple docstring'''
return len(self.features)
def __getitem__( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return self.features[i]
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
UpperCamelCase__ : List[InputFeatures]
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[int] = 128 , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : bool = False , ):
'''simple docstring'''
__a = hans_processors[task]()
__a = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__a , __a = label_list[2], label_list[1]
__a = label_list
__a = processor.get_dev_examples(__SCREAMING_SNAKE_CASE) if evaluate else processor.get_train_examples(__SCREAMING_SNAKE_CASE)
__a = hans_convert_examples_to_features(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features) , desc='''convert examples to features'''):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(__SCREAMING_SNAKE_CASE)))
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__a = tf.data.Dataset.from_generator(
__SCREAMING_SNAKE_CASE , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([]),
'''input_ids''': tf.TensorShape([None, None]),
'''attention_mask''': tf.TensorShape([None, None]),
'''token_type_ids''': tf.TensorShape([None, None]),
},
tf.TensorShape([]),
) , )
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
return self.dataset
def __len__( self : Dict):
'''simple docstring'''
return len(self.features)
def __getitem__( self : str , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
return self.features[i]
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.label_list
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__SCREAMING_SNAKE_CASE , '''heuristics_train_set.txt''')) , '''train''')
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(__SCREAMING_SNAKE_CASE , '''heuristics_evaluation_set.txt''')) , '''dev''')
def _lowerCamelCase ( self : int):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = []
for i, line in enumerate(__SCREAMING_SNAKE_CASE):
if i == 0:
continue
__a = '''%s-%s''' % (set_type, line[0])
__a = line[5]
__a = line[6]
__a = line[7][2:] if line[7].startswith('''ex''') else line[7]
__a = line[0]
examples.append(InputExample(guid=__SCREAMING_SNAKE_CASE , text_a=__SCREAMING_SNAKE_CASE , text_b=__SCREAMING_SNAKE_CASE , label=__SCREAMING_SNAKE_CASE , pairID=__SCREAMING_SNAKE_CASE))
return examples
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = {label: i for i, label in enumerate(_UpperCAmelCase )}
__a = []
for ex_index, example in tqdm.tqdm(enumerate(_UpperCAmelCase ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__a = tokenizer(
example.text_a , example.text_b , add_special_tokens=_UpperCAmelCase , max_length=_UpperCAmelCase , padding='''max_length''' , truncation=_UpperCAmelCase , return_overflowing_tokens=_UpperCAmelCase , )
__a = label_map[example.label] if example.label in label_map else 0
__a = int(example.pairID )
features.append(InputFeatures(**_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
__snake_case :Union[str, Any] = {
'''hans''': 3,
}
__snake_case :List[Any] = {
'''hans''': HansProcessor,
}
| 60 |
from collections.abc import Generator
from math import sin
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''08x''' )[-8:]
__a = b''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def __snake_case ( _UpperCAmelCase ):
__a = b''''''
for char in message:
bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' )
__a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_UpperCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def __snake_case ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(_UpperCAmelCase ) , 512 ):
__a = bit_string[pos : pos + 512]
__a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def __snake_case ( _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
__a = format(_UpperCAmelCase , '''032b''' )
__a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_UpperCAmelCase , 2 )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return (a + b) % 2**32
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def __snake_case ( _UpperCAmelCase ):
__a = preprocess(_UpperCAmelCase )
__a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__a = 0X67_452_301
__a = 0Xef_cda_b89
__a = 0X98_bad_cfe
__a = 0X10_325_476
__a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_UpperCAmelCase ):
__a = aa
__a = ba
__a = ca
__a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__a = d ^ (b & (c ^ d))
__a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__a = c ^ (d & (b ^ c))
__a = (5 * i + 1) % 16
elif i <= 47:
__a = b ^ c ^ d
__a = (3 * i + 5) % 16
else:
__a = c ^ (b | not_aa(_UpperCAmelCase ))
__a = (7 * i) % 16
__a = (f + a + added_consts[i] + block_words[g]) % 2**32
__a = d
__a = c
__a = b
__a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = sum_aa(_UpperCAmelCase , _UpperCAmelCase )
__a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 1 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _A ( unittest.TestCase ):
UpperCamelCase__ : Any = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
UpperCamelCase__ : Optional[int] = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
__a = AudioClassificationPipeline(model=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE)
# test with a raw waveform
__a = np.zeros((34_000,))
__a = np.zeros((14_000,))
return audio_classifier, [audioa, audio]
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
__a , __a = examples
__a = audio_classifier(__SCREAMING_SNAKE_CASE)
# by default a model is initialized with num_labels=2
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE), '''label''': ANY(__SCREAMING_SNAKE_CASE)},
{'''score''': ANY(__SCREAMING_SNAKE_CASE), '''label''': ANY(__SCREAMING_SNAKE_CASE)},
] , )
__a = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=1)
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE), '''label''': ANY(__SCREAMING_SNAKE_CASE)},
] , )
self.run_torchaudio(__SCREAMING_SNAKE_CASE)
@require_torchaudio
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
import datasets
# test with a local file
__a = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
__a = dataset[0]['''audio''']['''array''']
__a = audio_classifier(__SCREAMING_SNAKE_CASE)
self.assertEqual(
__SCREAMING_SNAKE_CASE , [
{'''score''': ANY(__SCREAMING_SNAKE_CASE), '''label''': ANY(__SCREAMING_SNAKE_CASE)},
{'''score''': ANY(__SCREAMING_SNAKE_CASE), '''label''': ANY(__SCREAMING_SNAKE_CASE)},
] , )
@require_torch
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = '''anton-l/wav2vec2-random-tiny-classifier'''
__a = pipeline('''audio-classification''' , model=__SCREAMING_SNAKE_CASE)
__a = np.ones((8_000,))
__a = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4)
__a = [
{'''score''': 0.08_42, '''label''': '''no'''},
{'''score''': 0.08_38, '''label''': '''up'''},
{'''score''': 0.08_37, '''label''': '''go'''},
{'''score''': 0.08_34, '''label''': '''right'''},
]
__a = [
{'''score''': 0.08_45, '''label''': '''stop'''},
{'''score''': 0.08_44, '''label''': '''on'''},
{'''score''': 0.08_41, '''label''': '''right'''},
{'''score''': 0.08_34, '''label''': '''left'''},
]
self.assertIn(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
__a = {'''array''': np.ones((8_000,)), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
__a = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4)
self.assertIn(nested_simplify(__SCREAMING_SNAKE_CASE , decimals=4) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2])
@require_torch
@slow
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
import datasets
__a = '''superb/wav2vec2-base-superb-ks'''
__a = pipeline('''audio-classification''' , model=__SCREAMING_SNAKE_CASE)
__a = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''')
__a = np.array(dataset[3]['''speech'''] , dtype=np.floataa)
__a = audio_classifier(__SCREAMING_SNAKE_CASE , top_k=4)
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE , decimals=3) , [
{'''score''': 0.9_81, '''label''': '''go'''},
{'''score''': 0.0_07, '''label''': '''up'''},
{'''score''': 0.0_06, '''label''': '''_unknown_'''},
{'''score''': 0.0_01, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''')
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
pass
| 60 |
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__snake_case :Union[str, Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__snake_case :str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__snake_case :List[Any] = '''zero2'''
__snake_case :Optional[Any] = '''zero3'''
__snake_case :str = [ZEROa, ZEROa]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
__a = parameterized.to_safe_name('''_'''.join(str(_UpperCAmelCase ) for x in param.args ) )
return f'{func.__name__}_{param_based_name}'
# Cartesian-product of zero stages with models to test
__snake_case :List[Any] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _A ( __UpperCAmelCase ):
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
@require_torch_multi_gpu
@parameterized.expand(__SCREAMING_SNAKE_CASE , name_func=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
self.run_and_check(
stage=__SCREAMING_SNAKE_CASE , model=__SCREAMING_SNAKE_CASE , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = models[model]
__a = self.run_trainer(
stage=__SCREAMING_SNAKE_CASE , model_name=__SCREAMING_SNAKE_CASE , eval_steps=__SCREAMING_SNAKE_CASE , num_train_epochs=1 , distributed=__SCREAMING_SNAKE_CASE , fpaa=__SCREAMING_SNAKE_CASE , )
self.do_checks(__SCREAMING_SNAKE_CASE)
return output_dir
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int = 10 , __SCREAMING_SNAKE_CASE : int = 1 , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
__a = self.get_auto_remove_tmp_dir('''./xxx''' , after=__SCREAMING_SNAKE_CASE)
__a = F'\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(__SCREAMING_SNAKE_CASE)}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n '.split()
if fpaa:
args.extend(['''--fp16'''])
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
__a = F'--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'.split()
__a = [F'{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py']
__a = self.get_launcher(__SCREAMING_SNAKE_CASE)
__a = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=self.get_env())
return output_dir
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[Any]=False):
'''simple docstring'''
__a = min(2 , get_gpu_count()) if distributed else 1
return F'deepspeed --num_nodes 1 --num_gpus {num_gpus}'.split()
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__snake_case :str = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Union[str, Any] = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__snake_case :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = False ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = f'Expected string as input, found {type(_UpperCAmelCase )}'
raise ValueError(_UpperCAmelCase )
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a = f'Expected boolean as use_pascal parameter, found {type(_UpperCAmelCase )}'
raise ValueError(_UpperCAmelCase )
__a = input_str.split('''_''' )
__a = 0 if use_pascal else 1
__a = words[start_index:]
__a = [word[0].upper() + word[1:] for word in words_to_capitalize]
__a = '''''' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 60 | 1 |
def __snake_case ( _UpperCAmelCase = 1000 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 60 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__snake_case :List[str] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class _A :
UpperCamelCase__ : str
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
UpperCamelCase__ : Optional[Union[str, int]] = None
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a , __a , __a = _str_to_version_tuple(self.version_str)
def __repr__( self : Tuple):
'''simple docstring'''
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return self.major, self.minor, self.patch
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return Version(__SCREAMING_SNAKE_CASE)
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
return other
raise TypeError(F'{other} (type {type(__SCREAMING_SNAKE_CASE)}) cannot be compared to version.')
def __eq__( self : int , __SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
try:
__a = self._validate_operand(__SCREAMING_SNAKE_CASE)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : str , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = self._validate_operand(__SCREAMING_SNAKE_CASE)
return self.tuple < other.tuple
def __hash__( self : Optional[Any]):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def _lowerCamelCase ( cls : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.version_str
def __snake_case ( _UpperCAmelCase ):
__a = _VERSION_REG.match(_UpperCAmelCase )
if not res:
raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(_UpperCAmelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def __snake_case ( _UpperCAmelCase ):
return ".".join(str(_UpperCAmelCase ) for v in version_tuple )
| 60 | 1 |
from __future__ import annotations
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = len(_UpperCAmelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_UpperCAmelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _UpperCAmelCase , _UpperCAmelCase , )
def __snake_case ( _UpperCAmelCase ):
__a = []
depth_first_search([] , [] , [] , _UpperCAmelCase , _UpperCAmelCase )
# Print all the boards
for board in boards:
for column in board:
print(_UpperCAmelCase )
print('''''' )
print(len(_UpperCAmelCase ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 60 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__snake_case :int = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _A ( tr.AbstractTransform ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : str = " "):
'''simple docstring'''
__a = sentence_delimiter
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
return list(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = []
for sent_idx, sentence in enumerate(__SCREAMING_SNAKE_CASE):
chars.extend(self.process_string(__SCREAMING_SNAKE_CASE))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__SCREAMING_SNAKE_CASE) - 1:
chars.append(self.sentence_delimiter)
return chars
__snake_case :Any = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__snake_case :Optional[int] = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__snake_case :Optional[int] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__snake_case :Tuple = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__snake_case :Tuple = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
'''https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates''',
] , )
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )["wer"]
__a = 0
__a = 0
for prediction, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = jiwer.compute_measures(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truth_transform=__SCREAMING_SNAKE_CASE , hypothesis_transform=__SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 60 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_UpperCAmelCase ):
requests.request('''GET''' , '''https://huggingface.co''' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('''GET''' , '''https://huggingface.co''' , timeout=1.0 )
@pytest.mark.integration
def __snake_case ( ):
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('''GET''' , '''https://huggingface.co''' )
def __snake_case ( ):
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_UpperCAmelCase ):
http_head('''https://huggingface.co''' )
| 60 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case :Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = ['''ViTFeatureExtractor''']
__snake_case :Optional[Any] = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case :Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = ['''ViTFeatureExtractor''']
__snake_case :Optional[Any] = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case :Dict = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : List[str] = GPTSwaTokenizer
UpperCamelCase__ : Dict = False
UpperCamelCase__ : int = True
UpperCamelCase__ : List[Any] = False
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE , eos_token='''<unk>''' , bos_token='''<unk>''' , pad_token='''<unk>''')
tokenizer.save_pretrained(self.tmpdirname)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
__a = '''This is a test'''
__a = '''This is a test'''
return input_text, output_text
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = '''<s>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<unk>''')
self.assertEqual(vocab_keys[1] , '''<s>''')
self.assertEqual(vocab_keys[-1] , '''j''')
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 2_000)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2_000)
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = tokenizer.tokenize('''This is a test''')
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE) , [465, 287, 265, 631, 842])
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''] , )
# fmt: on
__a = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE)
self.assertListEqual(
__SCREAMING_SNAKE_CASE , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
__a = tokenizer.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE)
# fmt: off
self.assertListEqual(
__SCREAMING_SNAKE_CASE , ['''▁I''', '''▁was''', '''▁bor''', '''n''', '''▁in''', '''▁''', '''<0x39>''', '''2''', '''0''', '''0''', '''0''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁f''', '''al''', '''s''', '''<0xC3>''', '''<0xA9>''', '''.'''])
# fmt: on
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = GPTSwaTokenizer(__SCREAMING_SNAKE_CASE)
__a = ['''This is a test''', '''I was born in 92000, and this is falsé.''']
__a = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertListEqual(tokenizer.encode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
# Test that decode_fast returns the input text
for text, token_ids in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
self.assertEqual(tokenizer.decode_fast(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE)
@slow
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = [
'''<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')''',
'''Hey there, how are you doing this fine day?''',
'''This is a text with a trailing spaces followed by a dot .''',
'''Häj sväjs lillebrör! =)''',
'''Det är inget fel på Mr. Cool''',
]
# fmt: off
__a = {'''input_ids''': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__SCREAMING_SNAKE_CASE , model_name='''AI-Sweden/gpt-sw3-126m''' , sequences=__SCREAMING_SNAKE_CASE , )
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case :Dict = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[Any] = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__snake_case :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
from __future__ import annotations
__snake_case :Optional[Any] = []
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase , -1 , -1 ) , range(_UpperCAmelCase , len(_UpperCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
if row >= len(_UpperCAmelCase ):
solution.append(_UpperCAmelCase )
printboard(_UpperCAmelCase )
print()
return True
for i in range(len(_UpperCAmelCase ) ):
if is_safe(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = 1
solve(_UpperCAmelCase , row + 1 )
__a = 0
return False
def __snake_case ( _UpperCAmelCase ):
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(_UpperCAmelCase ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
__snake_case :Optional[Any] = 8
__snake_case :Tuple = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 60 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__snake_case :Dict = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def __snake_case ( ):
__a = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__a = get_sagemaker_input()
else:
__a = get_cluster_input()
return config
def __snake_case ( _UpperCAmelCase=None ):
if subparsers is not None:
__a = subparsers.add_parser('''config''' , description=_UpperCAmelCase )
else:
__a = argparse.ArgumentParser('''Accelerate config command''' , description=_UpperCAmelCase )
parser.add_argument(
'''--config_file''' , default=_UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def __snake_case ( _UpperCAmelCase ):
__a = get_user_input()
if args.config_file is not None:
__a = args.config_file
else:
if not os.path.isdir(_UpperCAmelCase ):
os.makedirs(_UpperCAmelCase )
__a = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(_UpperCAmelCase )
else:
config.to_yaml_file(_UpperCAmelCase )
print(f'accelerate configuration saved at {config_file}' )
def __snake_case ( ):
__a = config_command_parser()
__a = parser.parse_args()
config_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 60 |
def __snake_case ( _UpperCAmelCase ):
__a = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __snake_case ( _UpperCAmelCase ):
__a = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__a = remove_duplicates(key.upper() )
__a = len(_UpperCAmelCase )
# First fill cipher with key characters
__a = {alphabet[i]: char for i, char in enumerate(_UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_UpperCAmelCase ) , 26 ):
__a = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__a = alphabet[i - offset]
__a = char
return cipher_alphabet
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return "".join(cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_UpperCAmelCase , _UpperCAmelCase ) for ch in message.upper() )
def __snake_case ( ):
__a = input('''Enter message to encode or decode: ''' ).strip()
__a = input('''Enter keyword: ''' ).strip()
__a = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__a = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__a = create_cipher_map(_UpperCAmelCase )
print(func(_UpperCAmelCase , _UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case :int = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :int = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__snake_case :Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 60 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__snake_case :List[Any] = None
__snake_case :Dict = logging.get_logger(__name__)
__snake_case :Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case :Union[str, Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__snake_case :Optional[Any] = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__snake_case :Optional[int] = '''▁'''
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : str = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : Dict = BarthezTokenizer
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int]=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Tuple="<s>" , __SCREAMING_SNAKE_CASE : List[str]="</s>" , __SCREAMING_SNAKE_CASE : Tuple="</s>" , __SCREAMING_SNAKE_CASE : int="<s>" , __SCREAMING_SNAKE_CASE : Any="<unk>" , __SCREAMING_SNAKE_CASE : int="<pad>" , __SCREAMING_SNAKE_CASE : Any="<mask>" , **__SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
__a = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else mask_token
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = vocab_file
__a = False if not self.vocab_file else True
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None):
'''simple docstring'''
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''')
if not os.path.isdir(__SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
__a = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(__SCREAMING_SNAKE_CASE):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 60 | 1 |
import random
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a , __a , __a = [], [], []
for element in data:
if element < pivot:
less.append(_UpperCAmelCase )
elif element > pivot:
greater.append(_UpperCAmelCase )
else:
equal.append(_UpperCAmelCase )
return less, equal, greater
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_UpperCAmelCase ) or index < 0:
return None
__a = items[random.randint(0 , len(_UpperCAmelCase ) - 1 )]
__a = 0
__a , __a , __a = _partition(_UpperCAmelCase , _UpperCAmelCase )
__a = len(_UpperCAmelCase )
__a = len(_UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_UpperCAmelCase , _UpperCAmelCase )
# must be in larger
else:
return quick_select(_UpperCAmelCase , index - (m + count) )
| 60 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__snake_case :Optional[int] = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__snake_case :Optional[int] = '''https://storage.googleapis.com/cvdf-datasets/mnist/'''
def __snake_case ( _UpperCAmelCase ):
__a = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(rows * cols * num_images )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
__a = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 )
return data
@deprecated(_UpperCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = labels_dense.shape[0]
__a = numpy.arange(_UpperCAmelCase ) * num_classes
__a = numpy.zeros((num_labels, num_classes) )
__a = 1
return labels_one_hot
@deprecated(_UpperCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=10 ):
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
__a = _readaa(_UpperCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__a = _readaa(_UpperCAmelCase )
__a = bytestream.read(_UpperCAmelCase )
__a = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase )
return labels
class _A :
@deprecated(
__SCREAMING_SNAKE_CASE , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Any=dtypes.floataa , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Any=None , ):
'''simple docstring'''
__a , __a = random_seed.get_seed(__SCREAMING_SNAKE_CASE)
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda)
__a = dtypes.as_dtype(__SCREAMING_SNAKE_CASE).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype)
if fake_data:
__a = 10_000
__a = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
__a = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__a = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2])
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__a = images.astype(numpy.floataa)
__a = numpy.multiply(__SCREAMING_SNAKE_CASE , 1.0 / 2_55.0)
__a = images
__a = labels
__a = 0
__a = 0
@property
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return self._images
@property
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
return self._labels
@property
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._num_examples
@property
def _lowerCamelCase ( self : str):
'''simple docstring'''
return self._epochs_completed
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=True):
'''simple docstring'''
if fake_data:
__a = [1] * 784
__a = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__SCREAMING_SNAKE_CASE)],
[fake_label for _ in range(__SCREAMING_SNAKE_CASE)],
)
__a = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perma]
__a = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__a = self._num_examples - start
__a = self._images[start : self._num_examples]
__a = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__a = numpy.arange(self._num_examples)
numpy.random.shuffle(__SCREAMING_SNAKE_CASE)
__a = self.images[perm]
__a = self.labels[perm]
# Start next epoch
__a = 0
__a = batch_size - rest_num_examples
__a = self._index_in_epoch
__a = self._images[start:end]
__a = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0),
)
else:
self._index_in_epoch += batch_size
__a = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase , '''Please write your own downloading logic.''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
__a = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
__a = f.size()
print('''Successfully downloaded''' , _UpperCAmelCase , _UpperCAmelCase , '''bytes.''' )
return filepath
@deprecated(
_UpperCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=dtypes.floataa , _UpperCAmelCase=True , _UpperCAmelCase=5000 , _UpperCAmelCase=None , _UpperCAmelCase=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase )
__a = fake()
__a = fake()
__a = fake()
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
if not source_url: # empty string check
__a = DEFAULT_SOURCE_URL
__a = '''train-images-idx3-ubyte.gz'''
__a = '''train-labels-idx1-ubyte.gz'''
__a = '''t10k-images-idx3-ubyte.gz'''
__a = '''t10k-labels-idx1-ubyte.gz'''
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_images(_UpperCAmelCase )
__a = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase , '''rb''' ) as f:
__a = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
__a = (
'''Validation size should be between 0 and '''
f'{len(_UpperCAmelCase )}. Received: {validation_size}.'
)
raise ValueError(_UpperCAmelCase )
__a = train_images[:validation_size]
__a = train_labels[:validation_size]
__a = train_images[validation_size:]
__a = train_labels[validation_size:]
__a = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
__a = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
| 60 | 1 |
def __snake_case ( _UpperCAmelCase ):
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __snake_case ( _UpperCAmelCase ):
__a = 0
__a = number
while duplicate > 0:
__a , __a = divmod(_UpperCAmelCase , 10 )
fact_sum += factorial(_UpperCAmelCase )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
__snake_case :int = int(input('''Enter number: ''').strip())
print(
f'{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number.'
)
| 60 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : List[Any]=18 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : int=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=False , ):
'''simple docstring'''
__a = size if size is not None else {'''height''': 20, '''width''': 20}
__a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_reduce_labels
def _lowerCamelCase ( self : str):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(dataset[0]['''file'''] )
__a = Image.open(dataset[1]['''file'''] )
return image, map
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(ds[0]['''file'''] )
__a = Image.open(ds[1]['''file'''] )
__a = Image.open(ds[2]['''file'''] )
__a = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = BeitImageProcessingTester(self)
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std'''))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
__a = []
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
__a = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test not batched input (PIL images)
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched input (PIL images)
__a , __a = prepare_semantic_batch_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 150)
__a = True
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
| 60 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __snake_case ( _UpperCAmelCase ):
return getitem, k
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
return setitem, k, v
def __snake_case ( _UpperCAmelCase ):
return delitem, k
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase ):
try:
return fun(_UpperCAmelCase , *_UpperCAmelCase ), None
except Exception as e:
return None, e
__snake_case :Optional[int] = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
__snake_case :List[Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
__snake_case :Union[str, Any] = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
__snake_case :Dict = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
__snake_case :Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__snake_case :Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def __snake_case ( _UpperCAmelCase ):
__a = HashMap(initial_block_size=4 )
__a = {}
for _, (fun, *args) in enumerate(_UpperCAmelCase ):
__a , __a = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase )
__a , __a = _run_operation(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase )
assert my_res == py_res
assert str(_UpperCAmelCase ) == str(_UpperCAmelCase )
assert set(_UpperCAmelCase ) == set(_UpperCAmelCase )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase )
assert set(my.items() ) == set(py.items() )
def __snake_case ( ):
def is_public(_UpperCAmelCase ) -> bool:
return not name.startswith('''_''' )
__a = {name for name in dir({} ) if is_public(_UpperCAmelCase )}
__a = {name for name in dir(HashMap() ) if is_public(_UpperCAmelCase )}
assert dict_public_names > hash_public_names
| 60 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : int):
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''])
for i, r in enumerate(__SCREAMING_SNAKE_CASE):
self.assertDictEqual(__SCREAMING_SNAKE_CASE , example_records[i])
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self._create_example_records()
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
__a = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def _lowerCamelCase ( self : int): # checks what happens with missing columns
'''simple docstring'''
__a = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertDictEqual(dset[0] , {'''col_1''': 1})
self.assertDictEqual(dset[1] , {'''col_1''': None}) # NB: first record is used for columns
def _lowerCamelCase ( self : Optional[Any]): # checks if the type can be inferred from the second record
'''simple docstring'''
__a = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
__a = Dataset.from_list(__SCREAMING_SNAKE_CASE)
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''')))
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = Dataset.from_list([])
self.assertEqual(len(__SCREAMING_SNAKE_CASE) , 0)
self.assertListEqual(dset.column_names , [])
| 60 | 1 |
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :str = logging.get_logger(__name__)
__snake_case :str = ['''model.decoder.embed_positions.weights''']
def __snake_case ( _UpperCAmelCase ):
if "emb" in name:
__a = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
__a = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
__a = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
__a = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
__a = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
__a = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
__a = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
__a = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
__a = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
__a = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__a = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = list(state_dict.keys() )
__a = {}
for key in keys:
__a = state_dict.pop(_UpperCAmelCase )
__a = rename_keys(_UpperCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
__a = val[:hidden_size, :]
__a = val[hidden_size : 2 * hidden_size, :]
__a = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__a = val
else:
__a = val
return state_dict, enc_dec_proj_state_dict
def __snake_case ( _UpperCAmelCase ):
if checkpoint == "small":
# default config values
__a = 1024
__a = 24
__a = 16
elif checkpoint == "medium":
__a = 1536
__a = 48
__a = 24
elif checkpoint == "large":
__a = 2048
__a = 48
__a = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__a = MusicgenDecoderConfig(
hidden_size=_UpperCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , )
return config
@torch.no_grad()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="cpu" ):
__a = MusicGen.get_pretrained(_UpperCAmelCase , device=_UpperCAmelCase )
__a = decoder_config_from_checkpoint(_UpperCAmelCase )
__a = fairseq_model.lm.state_dict()
__a , __a = rename_state_dict(
_UpperCAmelCase , hidden_size=decoder_config.hidden_size )
__a = TaEncoderModel.from_pretrained('''t5-base''' )
__a = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__a = MusicgenForCausalLM(_UpperCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__a , __a = decoder.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_UpperCAmelCase )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(_UpperCAmelCase ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__a = MusicgenForConditionalGeneration(text_encoder=_UpperCAmelCase , audio_encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_UpperCAmelCase )
# check we can do a forward pass
__a = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__a = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__a = model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__a = AutoTokenizer.from_pretrained('''t5-base''' )
__a = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
__a = MusicgenProcessor(feature_extractor=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
# set the appropriate bos/pad token ids
__a = 2048
__a = 2048
# set other default generation config params
__a = int(30 * audio_encoder.config.frame_rate )
__a = True
__a = 3.0
if pytorch_dump_folder is not None:
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(_UpperCAmelCase )
processor.save_pretrained(_UpperCAmelCase )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(_UpperCAmelCase )
processor.push_to_hub(_UpperCAmelCase )
if __name__ == "__main__":
__snake_case :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
__snake_case :Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 60 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def __snake_case ( _UpperCAmelCase ):
__a = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def __snake_case ( _UpperCAmelCase ):
__a = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', '''stage2.cls_token''') )
return token
def __snake_case ( ):
__a = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = '''imagenet-1k-id2label.json'''
__a = 1000
__a = '''huggingface/label-files'''
__a = num_labels
__a = json.load(open(cached_download(hf_hub_url(_UpperCAmelCase , _UpperCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) )
__a = {int(_UpperCAmelCase ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
__a = __a = CvtConfig(num_labels=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
__a = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
__a = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__a = [2, 2, 20]
__a = [3, 12, 16]
__a = [192, 768, 1024]
__a = CvtForImageClassification(_UpperCAmelCase )
__a = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
__a = image_size
__a = torch.load(_UpperCAmelCase , map_location=torch.device('''cpu''' ) )
__a = OrderedDict()
__a = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__a = list_of_state_dict + cls_token(_UpperCAmelCase )
__a = list_of_state_dict + embeddings(_UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__a = list_of_state_dict + attention(_UpperCAmelCase , _UpperCAmelCase )
__a = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
__a = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_UpperCAmelCase )
model.save_pretrained(_UpperCAmelCase )
image_processor.save_pretrained(_UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__snake_case :str = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__snake_case :Dict = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 60 | 1 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Optional[int] = '''char'''
UpperCamelCase__ : List[Any] = '''bpe'''
UpperCamelCase__ : Dict = '''wp'''
__snake_case :Any = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _A ( __UpperCAmelCase ):
UpperCamelCase__ : Any = ['''image_processor''', '''char_tokenizer''']
UpperCamelCase__ : int = '''ViTImageProcessor'''
UpperCamelCase__ : Union[str, Any] = '''MgpstrTokenizer'''
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __SCREAMING_SNAKE_CASE , )
__a = kwargs.pop('''feature_extractor''')
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
__a = tokenizer
__a = AutoTokenizer.from_pretrained('''gpt2''')
__a = AutoTokenizer.from_pretrained('''bert-base-uncased''')
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def __call__( self : Tuple , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : int=None , **__SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''')
if images is not None:
__a = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if text is not None:
__a = self.char_tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if text is None:
return inputs
elif images is None:
return encodings
else:
__a = encodings['''input_ids''']
return inputs
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int]):
'''simple docstring'''
__a , __a , __a = sequences
__a = char_preds.size(0)
__a , __a = self._decode_helper(__SCREAMING_SNAKE_CASE , '''char''')
__a , __a = self._decode_helper(__SCREAMING_SNAKE_CASE , '''bpe''')
__a , __a = self._decode_helper(__SCREAMING_SNAKE_CASE , '''wp''')
__a = []
__a = []
for i in range(__SCREAMING_SNAKE_CASE):
__a = [char_scores[i], bpe_scores[i], wp_scores[i]]
__a = [char_strs[i], bpe_strs[i], wp_strs[i]]
__a = scores.index(max(__SCREAMING_SNAKE_CASE))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
__a = {}
__a = final_strs
__a = final_scores
__a = char_strs
__a = bpe_strs
__a = wp_strs
return out
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]):
'''simple docstring'''
if format == DecodeType.CHARACTER:
__a = self.char_decode
__a = 1
__a = '''[s]'''
elif format == DecodeType.BPE:
__a = self.bpe_decode
__a = 2
__a = '''#'''
elif format == DecodeType.WORDPIECE:
__a = self.wp_decode
__a = 102
__a = '''[SEP]'''
else:
raise ValueError(F'Format {format} is not supported.')
__a , __a = [], []
__a = pred_logits.size(0)
__a = pred_logits.size(1)
__a , __a = pred_logits.topk(1 , dim=-1 , largest=__SCREAMING_SNAKE_CASE , sorted=__SCREAMING_SNAKE_CASE)
__a = preds_index.view(-1 , __SCREAMING_SNAKE_CASE)[:, 1:]
__a = decoder(__SCREAMING_SNAKE_CASE)
__a , __a = torch.nn.functional.softmax(__SCREAMING_SNAKE_CASE , dim=2).max(dim=2)
__a = preds_max_prob[:, 1:]
for index in range(__SCREAMING_SNAKE_CASE):
__a = preds_str[index].find(__SCREAMING_SNAKE_CASE)
__a = preds_str[index][:pred_eos]
__a = preds_index[index].cpu().tolist()
__a = pred_index.index(__SCREAMING_SNAKE_CASE) if eos_token in pred_index else -1
__a = preds_max_prob[index][: pred_eos_index + 1]
__a = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__SCREAMING_SNAKE_CASE)
conf_scores.append(__SCREAMING_SNAKE_CASE)
return dec_strs, conf_scores
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str]):
'''simple docstring'''
__a = [seq.replace(''' ''' , '''''') for seq in self.char_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)]
return decode_strs
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = [seq.replace(''' ''' , '''''') for seq in self.wp_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)]
return decode_strs
| 60 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __snake_case ( _UpperCAmelCase ):
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(_UpperCAmelCase ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(_UpperCAmelCase )
return 2.0 * image - 1.0
class _A ( __UpperCAmelCase ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : VQModel , __SCREAMING_SNAKE_CASE : UNetaDModel , __SCREAMING_SNAKE_CASE : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE)
@torch.no_grad()
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[torch.Tensor, PIL.Image.Image] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : Optional[int] = 100 , __SCREAMING_SNAKE_CASE : Optional[float] = 0.0 , __SCREAMING_SNAKE_CASE : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = 1
elif isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
__a = image.shape[0]
else:
raise ValueError(F'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(__SCREAMING_SNAKE_CASE)}')
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = preprocess(__SCREAMING_SNAKE_CASE)
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters()).dtype
__a = randn_tensor(__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , device=self.device , dtype=__SCREAMING_SNAKE_CASE)
__a = image.to(device=self.device , dtype=__SCREAMING_SNAKE_CASE)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(__SCREAMING_SNAKE_CASE , device=self.device)
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(__SCREAMING_SNAKE_CASE):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1)
__a = self.scheduler.scale_model_input(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# predict the noise residual
__a = self.unet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(__SCREAMING_SNAKE_CASE).sample
__a = torch.clamp(__SCREAMING_SNAKE_CASE , -1.0 , 1.0)
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(__SCREAMING_SNAKE_CASE)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__SCREAMING_SNAKE_CASE)
| 60 | 1 |
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
__snake_case :Union[str, Any] = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def __snake_case ( _UpperCAmelCase = "mumbai" ):
__a = BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
__a = job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
__a = job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(f'Job {i:>2} is {job[0]} at {job[1]}')
| 60 |
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__snake_case :Any = TypeVar('''KT''')
__snake_case :List[str] = TypeVar('''VT''')
class _A ( Generic[KT, VT] ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : KT | str = "root" , __SCREAMING_SNAKE_CASE : VT | None = None):
'''simple docstring'''
__a = key
__a = value
__a = []
def __repr__( self : Dict):
'''simple docstring'''
return F'Node({self.key}: {self.value})'
@property
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
return len(self.forward)
class _A ( Generic[KT, VT] ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : float = 0.5 , __SCREAMING_SNAKE_CASE : int = 16):
'''simple docstring'''
__a = Node[KT, VT]()
__a = 0
__a = p
__a = max_level
def __str__( self : Union[str, Any]):
'''simple docstring'''
__a = list(self)
if len(__SCREAMING_SNAKE_CASE) == 0:
return F'SkipList(level={self.level})'
__a = max((len(str(__SCREAMING_SNAKE_CASE)) for item in items) , default=4)
__a = max(__SCREAMING_SNAKE_CASE , 4) + 4
__a = self.head
__a = []
__a = node.forward.copy()
lines.append(F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''') + '''* ''' * len(__SCREAMING_SNAKE_CASE))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
while len(node.forward) != 0:
__a = node.forward[0]
lines.append(
F'[{node.key}]'.ljust(__SCREAMING_SNAKE_CASE , '''-''')
+ ''' '''.join(str(n.key) if n.key == node.key else '''|''' for n in forwards))
lines.append(''' ''' * label_size + '''| ''' * len(__SCREAMING_SNAKE_CASE))
__a = node.forward
lines.append('''None'''.ljust(__SCREAMING_SNAKE_CASE) + '''* ''' * len(__SCREAMING_SNAKE_CASE))
return F'SkipList(level={self.level})\n' + "\n".join(__SCREAMING_SNAKE_CASE)
def __iter__( self : int):
'''simple docstring'''
__a = self.head
while len(node.forward) != 0:
yield node.forward[0].key
__a = node.forward[0]
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = []
__a = self.head
for i in reversed(range(self.level)):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__a = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE)
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : KT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__a = node.forward[i]
else:
__a = update_node.forward[:i]
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : KT , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
__a = value
else:
__a = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __SCREAMING_SNAKE_CASE):
update_vector.append(self.head)
__a = level
__a = Node(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
for i, update_node in enumerate(update_vector[:level]):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i])
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE)
else:
__a = new_node
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : VT):
'''simple docstring'''
__a , __a = self._locate_node(__SCREAMING_SNAKE_CASE)
if node is not None:
return node.value
return None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__a = skip_list.head
__a = {}
while node.level != 0:
__a = node.forward[0]
__a = node.value
if len(_UpperCAmelCase ) != 4:
print()
assert len(_UpperCAmelCase ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __snake_case ( ):
__a = SkipList()
assert skip_list.find('''Some key''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def __snake_case ( ):
__a = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def __snake_case ( ):
__a = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(_UpperCAmelCase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_UpperCAmelCase )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __snake_case ( ):
def is_sorted(_UpperCAmelCase ):
return all(next_item >= item for item, next_item in zip(_UpperCAmelCase , lst[1:] ) )
__a = SkipList()
for i in range(10 ):
skip_list.insert(_UpperCAmelCase , _UpperCAmelCase )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_UpperCAmelCase ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_UpperCAmelCase ) )
def __snake_case ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __snake_case ( ):
__a = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 60 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _A ( unittest.TestCase ):
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int=7 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : List[Any]=18 , __SCREAMING_SNAKE_CASE : Optional[Any]=30 , __SCREAMING_SNAKE_CASE : int=400 , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : Any=[0.5, 0.5, 0.5] , __SCREAMING_SNAKE_CASE : List[str]=False , ):
'''simple docstring'''
__a = size if size is not None else {'''height''': 20, '''width''': 20}
__a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__a = parent
__a = batch_size
__a = num_channels
__a = image_size
__a = min_resolution
__a = max_resolution
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = do_normalize
__a = image_mean
__a = image_std
__a = do_reduce_labels
def _lowerCamelCase ( self : str):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(dataset[0]['''file'''] )
__a = Image.open(dataset[1]['''file'''] )
return image, map
def __snake_case ( ):
__a = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__a = Image.open(ds[0]['''file'''] )
__a = Image.open(ds[1]['''file'''] )
__a = Image.open(ds[2]['''file'''] )
__a = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _A ( __UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ : Union[str, Any] = BeitImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = BeitImageProcessingTester(self)
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_resize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''size'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''center_crop'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''do_normalize'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_mean'''))
self.assertTrue(hasattr(__SCREAMING_SNAKE_CASE , '''image_std'''))
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20})
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
__a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__SCREAMING_SNAKE_CASE)
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42})
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84})
self.assertEqual(image_processor.do_reduce_labels , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , Image.Image)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , numpify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , np.ndarray)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
# Test not batched input
__a = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__SCREAMING_SNAKE_CASE , torchify=__SCREAMING_SNAKE_CASE)
__a = []
for image in image_inputs:
self.assertIsInstance(__SCREAMING_SNAKE_CASE , torch.Tensor)
maps.append(torch.zeros(image.shape[-2:]).long())
# Test not batched input
__a = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test not batched input (PIL images)
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
# Test batched input (PIL images)
__a , __a = prepare_semantic_batch_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long)
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = self.image_processing_class(**self.image_processor_dict)
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__a , __a = prepare_semantic_single_inputs()
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 150)
__a = True
__a = image_processing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_tensors='''pt''')
self.assertTrue(encoding['''labels'''].min().item() >= 0)
self.assertTrue(encoding['''labels'''].max().item() <= 255)
| 60 |
__snake_case :str = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
# Return True if there is node that has not iterated.
__a = [False] * len(_UpperCAmelCase )
__a = [s]
__a = True
while queue:
__a = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_UpperCAmelCase )
__a = True
__a = u
return visited[t]
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [-1] * (len(_UpperCAmelCase ))
__a = 0
__a = []
__a = [i[:] for i in graph] # Record original cut, copy.
while bfs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = float('''Inf''' )
__a = sink
while s != source:
# Find the minimum value in select path
__a = min(_UpperCAmelCase , graph[parent[s]][s] )
__a = parent[s]
max_flow += path_flow
__a = sink
while v != source:
__a = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__a = parent[v]
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 60 | 1 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__snake_case :int = logging.get_logger(__name__)
@add_end_docstrings(
__UpperCAmelCase ,R'''
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
''' ,)
class _A ( __UpperCAmelCase ):
def _lowerCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : GenericTensor):
'''simple docstring'''
if self.framework == "tf":
__a = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()
elif self.framework == "pt":
__a = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE)
else:
raise ValueError('''Unsupported framework''')
return masked_index
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : GenericTensor):
'''simple docstring'''
__a = self.get_masked_index(__SCREAMING_SNAKE_CASE)
__a = np.prod(masked_index.shape)
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def _lowerCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : GenericTensor):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0])
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
if return_tensors is None:
__a = self.framework
__a = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE)
self.ensure_exactly_one_mask_token(__SCREAMING_SNAKE_CASE)
return model_inputs
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = self.model(**__SCREAMING_SNAKE_CASE)
__a = model_inputs['''input_ids''']
return model_outputs
def _lowerCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=5 , __SCREAMING_SNAKE_CASE : str=None):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
__a = target_ids.shape[0]
__a = model_outputs['''input_ids'''][0]
__a = model_outputs['''logits''']
if self.framework == "tf":
__a = tf.where(input_ids == self.tokenizer.mask_token_id).numpy()[:, 0]
__a = outputs.numpy()
__a = outputs[0, masked_index, :]
__a = stable_softmax(__SCREAMING_SNAKE_CASE , axis=-1)
if target_ids is not None:
__a = tf.gather_nd(tf.squeeze(__SCREAMING_SNAKE_CASE , 0) , target_ids.reshape(-1 , 1))
__a = tf.expand_dims(__SCREAMING_SNAKE_CASE , 0)
__a = tf.math.top_k(__SCREAMING_SNAKE_CASE , k=__SCREAMING_SNAKE_CASE)
__a , __a = topk.values.numpy(), topk.indices.numpy()
else:
__a = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__SCREAMING_SNAKE_CASE).squeeze(-1)
# Fill mask pipeline supports only one ${mask_token} per sample
__a = outputs[0, masked_index, :]
__a = logits.softmax(dim=-1)
if target_ids is not None:
__a = probs[..., target_ids]
__a , __a = probs.topk(__SCREAMING_SNAKE_CASE)
__a = []
__a = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist())):
__a = []
for v, p in zip(_values , _predictions):
# Copy is important since we're going to modify this array in place
__a = input_ids.numpy().copy()
if target_ids is not None:
__a = target_ids[p].tolist()
__a = p
# Filter padding out:
__a = tokens[np.where(tokens != self.tokenizer.pad_token_id)]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__a = self.tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE)
__a = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p]), '''sequence''': sequence}
row.append(__SCREAMING_SNAKE_CASE)
result.append(__SCREAMING_SNAKE_CASE)
if single_mask:
return result[0]
return result
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple=None):
'''simple docstring'''
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = [targets]
try:
__a = self.tokenizer.get_vocab()
except Exception:
__a = {}
__a = []
for target in targets:
__a = vocab.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if id_ is None:
__a = self.tokenizer(
__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE , max_length=1 , truncation=__SCREAMING_SNAKE_CASE , )['''input_ids''']
if len(__SCREAMING_SNAKE_CASE) == 0:
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
'''We cannot replace it with anything meaningful, ignoring it''')
continue
__a = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_)}`.')
target_ids.append(id_)
__a = list(set(__SCREAMING_SNAKE_CASE))
if len(__SCREAMING_SNAKE_CASE) == 0:
raise ValueError('''At least one target must be provided when passed.''')
__a = np.array(__SCREAMING_SNAKE_CASE)
return target_ids
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=None):
'''simple docstring'''
__a = {}
if targets is not None:
__a = self.get_target_ids(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
__a = target_ids
if top_k is not None:
__a = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''')
return {}, {}, postprocess_params
def __call__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int , *__SCREAMING_SNAKE_CASE : str , **__SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and len(__SCREAMING_SNAKE_CASE) == 1:
return outputs[0]
return outputs
| 60 |
from __future__ import annotations
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(_UpperCAmelCase ):
print(f'{i}\t\t{d}' )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
return True
return False
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = [float('''inf''' )] * vertex_count
__a = 0.0
for _ in range(vertex_count - 1 ):
for j in range(_UpperCAmelCase ):
__a , __a , __a = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('''inf''' ) and distance[u] + w < distance[v]:
__a = distance[u] + w
__a = check_negative_cycle(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if negative_cycle_exists:
raise Exception('''Negative cycle found''' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case :Dict = int(input('''Enter number of vertices: ''').strip())
__snake_case :Any = int(input('''Enter number of edges: ''').strip())
__snake_case :list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
__snake_case ,__snake_case ,__snake_case :int = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
__snake_case :Any = {'''src''': src, '''dst''': dest, '''weight''': weight}
__snake_case :List[str] = int(input('''\nEnter shortest path source:''').strip())
__snake_case :Optional[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 60 | 1 |
def __snake_case ( _UpperCAmelCase ):
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def __snake_case ( _UpperCAmelCase ):
__a = credit_card_number
__a = 0
__a = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
__a = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
__a = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __snake_case ( _UpperCAmelCase ):
__a = f'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(f'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(f'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(f'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCAmelCase ):
print(f'{error_message} it fails the Luhn check.' )
return False
print(f'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('''4111111111111111''')
validate_credit_card_number('''32323''')
| 60 |
import os
import sys
import unittest
__snake_case :Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__snake_case :List[str] = os.path.join(git_repo_path, '''src''', '''transformers''')
__snake_case :Any = '''
{0} = None
'''
__snake_case :Dict = '''
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
'''
__snake_case :str = '''
def {0}(*args, **kwargs):
requires_backends({0}, {1})
'''
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''')
self.assertIsNone(__SCREAMING_SNAKE_CASE)
__a = find_backend(''' if not is_tokenizers_available():''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''tokenizers''')
__a = find_backend(''' if not is_tensorflow_text_available():''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''tensorflow_text''')
__a = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers''')
__a = find_backend(
''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tensorflow_text''')
__a = find_backend(
''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''sentencepiece_and_tokenizers_and_vision''')
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''tensorflow_text''' , __SCREAMING_SNAKE_CASE)
self.assertIn('''sentencepiece_and_tokenizers''' , __SCREAMING_SNAKE_CASE)
# Likewise, we can't assert on the exact content of a key
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertModel''' , objects['''tf'''])
self.assertIn('''FlaxBertModel''' , objects['''flax'''])
self.assertIn('''BertModel''' , objects['''torch'''])
self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''])
self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''])
def _lowerCamelCase ( self : Any):
'''simple docstring'''
__a = create_dummy_object('''CONSTANT''' , '''\'torch\'''')
self.assertEqual(__SCREAMING_SNAKE_CASE , '''\nCONSTANT = None\n''')
__a = create_dummy_object('''function''' , '''\'torch\'''')
self.assertEqual(
__SCREAMING_SNAKE_CASE , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''')
__a = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
'''
__a = create_dummy_object('''FakeClass''' , '''\'torch\'''')
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
'''
__a = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']})
self.assertEqual(dummy_files['''torch'''] , __SCREAMING_SNAKE_CASE)
| 60 | 1 |
import math
def __snake_case ( _UpperCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( _UpperCAmelCase = 10001 ):
try:
__a = int(_UpperCAmelCase )
except (TypeError, ValueError):
raise TypeError('''Parameter nth must be int or castable to int.''' ) from None
if nth <= 0:
raise ValueError('''Parameter nth must be greater than or equal to one.''' )
__a = []
__a = 2
while len(_UpperCAmelCase ) < nth:
if is_prime(_UpperCAmelCase ):
primes.append(_UpperCAmelCase )
num += 1
else:
num += 1
return primes[len(_UpperCAmelCase ) - 1]
if __name__ == "__main__":
print(f'{solution() = }')
| 60 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__snake_case :str = get_logger()
__snake_case :Optional[dict] = None
class _A ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : int=None , __SCREAMING_SNAKE_CASE : List[Any]=None , **__SCREAMING_SNAKE_CASE : Any):
'''simple docstring'''
super().__init__(features=__SCREAMING_SNAKE_CASE)
import jax
from jaxlib.xla_client import Device
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(
F'Expected {device} to be a `str` not {type(__SCREAMING_SNAKE_CASE)}, as `jaxlib.xla_extension.Device` '
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''')
__a = device if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else str(jax.devices()[0])
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys()):
logger.warning(
F'Device with string identifier {self.device} not listed among the available '
F'devices: {list(DEVICE_MAPPING.keys())}, so falling back to the default '
F'device: {str(jax.devices()[0])}.')
__a = str(jax.devices()[0])
__a = jnp_array_kwargs
@staticmethod
def _lowerCamelCase ( ):
'''simple docstring'''
import jax
return {str(__SCREAMING_SNAKE_CASE): device for device in jax.devices()}
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) and column:
if all(
isinstance(__SCREAMING_SNAKE_CASE , jax.Array) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column):
return jnp.stack(__SCREAMING_SNAKE_CASE , axis=0)
return column
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__SCREAMING_SNAKE_CASE , (str, bytes, type(__SCREAMING_SNAKE_CASE))):
return value
elif isinstance(__SCREAMING_SNAKE_CASE , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
__a = {}
if isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
__a = {'''dtype''': jnp.intaa}
else:
__a = {'''dtype''': jnp.intaa}
elif isinstance(__SCREAMING_SNAKE_CASE , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
__a = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__SCREAMING_SNAKE_CASE , PIL.Image.Image):
__a = np.asarray(__SCREAMING_SNAKE_CASE)
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
__a = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device]):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__SCREAMING_SNAKE_CASE , **{**default_dtype, **self.jnp_array_kwargs})
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor):
return self._tensorize(data_struct.detach().cpu().numpy()[()])
if hasattr(__SCREAMING_SNAKE_CASE , '''__array__''') and not isinstance(__SCREAMING_SNAKE_CASE , jax.Array):
__a = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__SCREAMING_SNAKE_CASE , np.ndarray):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)):
return self._consolidate([self.recursive_tensorize(__SCREAMING_SNAKE_CASE) for substruct in data_struct])
return self._tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : dict):
'''simple docstring'''
return map_nested(self._recursive_tensorize , __SCREAMING_SNAKE_CASE , map_list=__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : str , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_row(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_row(__SCREAMING_SNAKE_CASE)
return self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_column(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_column(__SCREAMING_SNAKE_CASE , pa_table.column_names[0])
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
__a = self._consolidate(__SCREAMING_SNAKE_CASE)
return column
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
__a = self.numpy_arrow_extractor().extract_batch(__SCREAMING_SNAKE_CASE)
__a = self.python_features_decoder.decode_batch(__SCREAMING_SNAKE_CASE)
__a = self.recursive_tensorize(__SCREAMING_SNAKE_CASE)
for column_name in batch:
__a = self._consolidate(batch[column_name])
return batch
| 60 | 1 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__snake_case :List[Any] = datasets.utils.logging.get_logger(__name__)
__snake_case :Dict = ['''names''', '''prefix''']
__snake_case :Tuple = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
__snake_case :Tuple = ['''encoding_errors''', '''on_bad_lines''']
__snake_case :int = ['''date_format''']
@dataclass
class _A ( datasets.BuilderConfig ):
UpperCamelCase__ : str = ","
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[Union[int, List[int], str]] = "infer"
UpperCamelCase__ : Optional[List[str]] = None
UpperCamelCase__ : Optional[List[str]] = None
UpperCamelCase__ : Optional[Union[int, str, List[int], List[str]]] = None
UpperCamelCase__ : Optional[Union[List[int], List[str]]] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : bool = True
UpperCamelCase__ : Optional[Literal["c", "python", "pyarrow"]] = None
UpperCamelCase__ : Dict[Union[int, str], Callable[[Any], Any]] = None
UpperCamelCase__ : Optional[list] = None
UpperCamelCase__ : Optional[list] = None
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[Union[int, List[int]]] = None
UpperCamelCase__ : Optional[int] = None
UpperCamelCase__ : Optional[Union[str, List[str]]] = None
UpperCamelCase__ : bool = True
UpperCamelCase__ : bool = True
UpperCamelCase__ : bool = False
UpperCamelCase__ : bool = True
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : str = "."
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : str = '"'
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : bool = True
UpperCamelCase__ : bool = True
UpperCamelCase__ : int = 0
UpperCamelCase__ : bool = True
UpperCamelCase__ : bool = False
UpperCamelCase__ : Optional[str] = None
UpperCamelCase__ : int = 10_000
UpperCamelCase__ : Optional[datasets.Features] = None
UpperCamelCase__ : Optional[str] = "strict"
UpperCamelCase__ : Literal["error", "warn", "skip"] = "error"
UpperCamelCase__ : Optional[str] = None
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
if self.delimiter is not None:
__a = self.delimiter
if self.column_names is not None:
__a = self.column_names
@property
def _lowerCamelCase ( self : int):
'''simple docstring'''
__a = {
'''sep''': self.sep,
'''header''': self.header,
'''names''': self.names,
'''index_col''': self.index_col,
'''usecols''': self.usecols,
'''prefix''': self.prefix,
'''mangle_dupe_cols''': self.mangle_dupe_cols,
'''engine''': self.engine,
'''converters''': self.converters,
'''true_values''': self.true_values,
'''false_values''': self.false_values,
'''skipinitialspace''': self.skipinitialspace,
'''skiprows''': self.skiprows,
'''nrows''': self.nrows,
'''na_values''': self.na_values,
'''keep_default_na''': self.keep_default_na,
'''na_filter''': self.na_filter,
'''verbose''': self.verbose,
'''skip_blank_lines''': self.skip_blank_lines,
'''thousands''': self.thousands,
'''decimal''': self.decimal,
'''lineterminator''': self.lineterminator,
'''quotechar''': self.quotechar,
'''quoting''': self.quoting,
'''escapechar''': self.escapechar,
'''comment''': self.comment,
'''encoding''': self.encoding,
'''dialect''': self.dialect,
'''error_bad_lines''': self.error_bad_lines,
'''warn_bad_lines''': self.warn_bad_lines,
'''skipfooter''': self.skipfooter,
'''doublequote''': self.doublequote,
'''memory_map''': self.memory_map,
'''float_precision''': self.float_precision,
'''chunksize''': self.chunksize,
'''encoding_errors''': self.encoding_errors,
'''on_bad_lines''': self.on_bad_lines,
'''date_format''': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , __SCREAMING_SNAKE_CASE):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class _A ( datasets.ArrowBasedBuilder ):
UpperCamelCase__ : List[Any] = CsvConfig
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : str):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}')
__a = dl_manager.download_and_extract(self.config.data_files)
if isinstance(__SCREAMING_SNAKE_CASE , (str, list, tuple)):
__a = data_files
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = [files]
__a = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files})]
__a = []
for split_name, files in data_files.items():
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = [files]
__a = [dl_manager.iter_files(__SCREAMING_SNAKE_CASE) for file in files]
splits.append(datasets.SplitGenerator(name=__SCREAMING_SNAKE_CASE , gen_kwargs={'''files''': files}))
return splits
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : pa.Table):
'''simple docstring'''
if self.config.features is not None:
__a = self.config.features.arrow_schema
if all(not require_storage_cast(__SCREAMING_SNAKE_CASE) for feature in self.config.features.values()):
# cheaper cast
__a = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=__SCREAMING_SNAKE_CASE)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__a = table_cast(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
return pa_table
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
__a = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__a = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__SCREAMING_SNAKE_CASE) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__SCREAMING_SNAKE_CASE)):
__a = pd.read_csv(__SCREAMING_SNAKE_CASE , iterator=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(__SCREAMING_SNAKE_CASE):
__a = pa.Table.from_pandas(__SCREAMING_SNAKE_CASE)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__SCREAMING_SNAKE_CASE)
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(__SCREAMING_SNAKE_CASE)}: {e}')
raise
| 60 |
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__snake_case :Tuple = logging.getLogger(__name__)
if __name__ == "__main__":
__snake_case :Union[str, Any] = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_0522, type=int)
__snake_case :List[str] = parser.parse_args()
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file, '''rb''') as fp:
__snake_case :Optional[Any] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__snake_case :Dict = Counter()
for tk_ids in data:
counter.update(tk_ids)
__snake_case :Optional[Any] = [0] * args.vocab_size
for k, v in counter.items():
__snake_case :Any = v
logger.info(f'Dump to {args.token_counts_dump}')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 60 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.