code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowercase_ = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowercase_ = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=_UpperCAmelCase )[0]
@deprecated(_UpperCAmelCase , "Please use tf.data to implement this functionality." )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowercase__ = _readaa(_UpperCAmelCase )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowercase__ = _readaa(_UpperCAmelCase )
lowercase__ = _readaa(_UpperCAmelCase )
lowercase__ = _readaa(_UpperCAmelCase )
lowercase__ = bytestream.read(rows * cols * num_images )
lowercase__ = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
lowercase__ = data.reshape(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , 1 )
return data
@deprecated(_UpperCAmelCase , "Please use tf.one_hot on tensors." )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = labels_dense.shape[0]
lowercase__ = numpy.arange(_UpperCAmelCase ) * num_classes
lowercase__ = numpy.zeros((num_labels, num_classes) )
lowercase__ = 1
return labels_one_hot
@deprecated(_UpperCAmelCase , "Please use tf.data to implement this functionality." )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=_UpperCAmelCase ) as bytestream:
lowercase__ = _readaa(_UpperCAmelCase )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowercase__ = _readaa(_UpperCAmelCase )
lowercase__ = bytestream.read(_UpperCAmelCase )
lowercase__ = numpy.frombuffer(_UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(_UpperCAmelCase , _UpperCAmelCase )
return labels
class _snake_case :
@deprecated(
A_, "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models.", )
def __init__( self : Tuple, __lowercase : int, __lowercase : List[Any], __lowercase : Optional[int]=False, __lowercase : List[str]=False, __lowercase : Optional[Any]=dtypes.floataa, __lowercase : Any=True, __lowercase : Optional[Any]=None, ):
lowercase__ = random_seed.get_seed(A_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ = dtypes.as_dtype(A_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowercase__ = 1_0000
lowercase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ = images.reshape(
images.shape[0], images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ = images.astype(numpy.floataa )
lowercase__ = numpy.multiply(A_, 1.0 / 255.0 )
lowercase__ = images
lowercase__ = labels
lowercase__ = 0
lowercase__ = 0
@property
def A__ ( self : List[str] ):
return self._images
@property
def A__ ( self : int ):
return self._labels
@property
def A__ ( self : int ):
return self._num_examples
@property
def A__ ( self : Any ):
return self._epochs_completed
def A__ ( self : Optional[Any], __lowercase : Union[str, Any], __lowercase : Any=False, __lowercase : Optional[int]=True ):
if fake_data:
lowercase__ = [1] * 784
lowercase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(A_ )],
[fake_label for _ in range(A_ )],
)
lowercase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
lowercase__ = self.images[perma]
lowercase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ = self._num_examples - start
lowercase__ = self._images[start : self._num_examples]
lowercase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(A_ )
lowercase__ = self.images[perm]
lowercase__ = self.labels[perm]
# Start next epoch
lowercase__ = 0
lowercase__ = batch_size - rest_num_examples
lowercase__ = self._index_in_epoch
lowercase__ = self._images[start:end]
lowercase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part), axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part), axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(_UpperCAmelCase , "Please write your own downloading logic." )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not gfile.Exists(_UpperCAmelCase ):
gfile.MakeDirs(_UpperCAmelCase )
lowercase__ = os.path.join(_UpperCAmelCase , _UpperCAmelCase )
if not gfile.Exists(_UpperCAmelCase ):
urllib.request.urlretrieve(_UpperCAmelCase , _UpperCAmelCase ) # noqa: S310
with gfile.GFile(_UpperCAmelCase ) as f:
lowercase__ = f.size()
print("Successfully downloaded" , _UpperCAmelCase , _UpperCAmelCase , "bytes." )
return filepath
@deprecated(
_UpperCAmelCase , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=dtypes.floataa , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=5000 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=_UpperCAmelCase , one_hot=_UpperCAmelCase , dtype=_UpperCAmelCase , seed=_UpperCAmelCase )
lowercase__ = fake()
lowercase__ = fake()
lowercase__ = fake()
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
if not source_url: # empty string check
lowercase__ = DEFAULT_SOURCE_URL
lowercase__ = """train-images-idx3-ubyte.gz"""
lowercase__ = """train-labels-idx1-ubyte.gz"""
lowercase__ = """t10k-images-idx3-ubyte.gz"""
lowercase__ = """t10k-labels-idx1-ubyte.gz"""
lowercase__ = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_images_file )
with gfile.Open(_UpperCAmelCase , "rb" ) as f:
lowercase__ = _extract_images(_UpperCAmelCase )
lowercase__ = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(_UpperCAmelCase , "rb" ) as f:
lowercase__ = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
lowercase__ = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_images_file )
with gfile.Open(_UpperCAmelCase , "rb" ) as f:
lowercase__ = _extract_images(_UpperCAmelCase )
lowercase__ = _maybe_download(
_UpperCAmelCase , _UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(_UpperCAmelCase , "rb" ) as f:
lowercase__ = _extract_labels(_UpperCAmelCase , one_hot=_UpperCAmelCase )
if not 0 <= validation_size <= len(_UpperCAmelCase ):
lowercase__ = (
"""Validation size should be between 0 and """
f'''{len(_UpperCAmelCase )}. Received: {validation_size}.'''
)
raise ValueError(_UpperCAmelCase )
lowercase__ = train_images[:validation_size]
lowercase__ = train_labels[:validation_size]
lowercase__ = train_images[validation_size:]
lowercase__ = train_labels[validation_size:]
lowercase__ = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
lowercase__ = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
lowercase__ = _DataSet(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
return _Datasets(train=_UpperCAmelCase , validation=_UpperCAmelCase , test=_UpperCAmelCase )
| 413 | def UpperCAmelCase_ ( _UpperCAmelCase ):
lowerCamelCase_: Any = current_set.copy()
for row_index, row in enumerate(_UpperCAmelCase ):
lowerCamelCase_: Optional[Any] = row[0]
for column_index, column in enumerate(_UpperCAmelCase ):
if magnitude == 0:
lowerCamelCase_: Union[str, Any] = column
continue
lowerCamelCase_: Any = column / magnitude
# Subtract to cancel term
lowerCamelCase_: str = current_set[0]
lowerCamelCase_: Union[str, Any] = [first_row]
lowerCamelCase_: Optional[int] = current_set[1::]
for row in current_set:
lowerCamelCase_: List[Any] = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_UpperCAmelCase )
continue
for column_index in range(len(_UpperCAmelCase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_UpperCAmelCase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowerCamelCase_: Dict = final_set[0]
lowerCamelCase_: List[str] = []
lowerCamelCase_: Union[str, Any] = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowerCamelCase_: Any = simplify(_UpperCAmelCase )
for i in range(len(_UpperCAmelCase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _UpperCAmelCase )
lowerCamelCase_: Dict = resultant
return final_set
def UpperCAmelCase_ ( _UpperCAmelCase ):
if len(_UpperCAmelCase ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowerCamelCase_: Optional[Any] = len(_UpperCAmelCase ) + 1
if any(len(_UpperCAmelCase ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(_UpperCAmelCase , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(_UpperCAmelCase ) == 1:
return [equations[0][-1] / equations[0][0]]
lowerCamelCase_: Tuple = equations.copy()
if any(0 in row for row in data_set ):
lowerCamelCase_: Tuple = data_set.copy()
lowerCamelCase_: Tuple = []
for row_index, row in enumerate(_UpperCAmelCase ):
if 0 not in row:
lowerCamelCase_: Optional[int] = data_set.pop(_UpperCAmelCase )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , _UpperCAmelCase )
lowerCamelCase_: List[Any] = data_set.copy()
lowerCamelCase_: str = simplify(_UpperCAmelCase )
lowerCamelCase_: Union[str, Any] = simplified[::-1]
lowerCamelCase_: list = []
for row in simplified:
lowerCamelCase_: Any = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowerCamelCase_: List[str] = row.copy()[: len(_UpperCAmelCase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_UpperCAmelCase ) == 0:
solutions.append(0 )
continue
lowerCamelCase_: Optional[int] = temp_row[1::]
lowerCamelCase_: Optional[Any] = temp_row[::-1]
for column_index, column in enumerate(_UpperCAmelCase ):
current_solution -= column * solutions[column_index]
solutions.append(_UpperCAmelCase )
lowerCamelCase_: List[Any] = []
for item in solutions:
final.append(float(round(_UpperCAmelCase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : str = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 423 | 0 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __UpperCAmelCase ( self :Optional[Any] ):
torch.manual_seed(0 )
lowercase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def __UpperCAmelCase ( self :Tuple ):
torch.manual_seed(0 )
lowercase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def __UpperCAmelCase ( self :Optional[int] ):
torch.manual_seed(0 )
lowercase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowercase__ )
def __UpperCAmelCase ( self :Optional[Any] ):
lowercase = self.dummy_uncond_unet
lowercase = DDIMScheduler()
lowercase = self.dummy_vq_model
lowercase = LDMPipeline(unet=lowercase__ , vqvae=lowercase__ , scheduler=lowercase__ )
ldm.to(lowercase__ )
ldm.set_progress_bar_config(disable=lowercase__ )
lowercase = torch.manual_seed(0 )
lowercase = ldm(generator=lowercase__ , num_inference_steps=2 , output_type='numpy' ).images
lowercase = torch.manual_seed(0 )
lowercase = ldm(generator=lowercase__ , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase__ )[0]
lowercase = image[0, -3:, -3:, -1]
lowercase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
lowercase = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self :str ):
lowercase = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase__ )
ldm.set_progress_bar_config(disable=lowercase__ )
lowercase = torch.manual_seed(0 )
lowercase = ldm(generator=lowercase__ , num_inference_steps=5 , output_type='numpy' ).images
lowercase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
lowercase = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 716 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__magic_name__ = {
'''configuration_vision_text_dual_encoder''': ['''VisionTextDualEncoderConfig'''],
'''processing_vision_text_dual_encoder''': ['''VisionTextDualEncoderProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''VisionTextDualEncoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''FlaxVisionTextDualEncoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''TFVisionTextDualEncoderModel''']
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 1000 ) -> int:
_lowerCAmelCase , _lowerCAmelCase : Tuple = 1, 1
_lowerCAmelCase : Union[str, Any] = []
for i in range(1 ,n + 1 ):
_lowerCAmelCase : Union[str, Any] = prev_numerator + 2 * prev_denominator
_lowerCAmelCase : Optional[Any] = prev_numerator + prev_denominator
if len(str(_lowerCamelCase ) ) > len(str(_lowerCamelCase ) ):
result.append(_lowerCamelCase )
_lowerCAmelCase : List[str] = numerator
_lowerCAmelCase : Optional[int] = denominator
return len(_lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 213 | """simple docstring"""
from __future__ import annotations
_a : str = tuple[int, int, int]
_a : Any = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
_a : List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
# -------------------------- default selection --------------------------
# rotors --------------------------
_a : Tuple = 'EGZWVONAHDCLFQMSIPJBYUKXTR'
_a : int = 'FOBHMDKEXQNRAULPGSJVTYICZW'
_a : List[str] = 'ZJXESIUQLHAVRMDOYGTNFWPBKC'
# reflector --------------------------
_a : Optional[Any] = {
'A': 'N',
'N': 'A',
'B': 'O',
'O': 'B',
'C': 'P',
'P': 'C',
'D': 'Q',
'Q': 'D',
'E': 'R',
'R': 'E',
'F': 'S',
'S': 'F',
'G': 'T',
'T': 'G',
'H': 'U',
'U': 'H',
'I': 'V',
'V': 'I',
'J': 'W',
'W': 'J',
'K': 'X',
'X': 'K',
'L': 'Y',
'Y': 'L',
'M': 'Z',
'Z': 'M',
}
# -------------------------- extra rotors --------------------------
_a : Optional[Any] = 'RMDJXFUWGISLHVTCQNKYPBEZOA'
_a : Tuple = 'SGLCPQWZHKXAREONTFBVIYJUDM'
_a : Optional[Any] = 'HVSICLTYKQUBXDWAJZOMFGPREN'
_a : Any = 'RZWQHFMVDBKICJLNTUXAGYPSOE'
_a : int = 'LFKIJODBEGAMQPXVUHYSTCZRWN'
_a : List[str] = 'KOAEGVDHXPQZMLFTYWJNBRCIUS'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : RotorPositionT ,_lowerCamelCase : RotorSelectionT ,_lowerCamelCase : str ) -> tuple[RotorPositionT, RotorSelectionT, dict[str, str]]:
# Checks if there are 3 unique rotors
if (unique_rotsel := len(set(_lowerCamelCase ) )) < 3:
_lowerCAmelCase : List[Any] = f"Please use 3 unique rotors (not {unique_rotsel})"
raise Exception(_lowerCamelCase )
# Checks if rotor positions are valid
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = rotpos
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = f"First rotor position is not within range of 1..26 ({rotorposa}"
raise ValueError(_lowerCamelCase )
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = f"Second rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_lowerCamelCase )
if not 0 < rotorposa <= len(_lowerCamelCase ):
_lowerCAmelCase : Dict = f"Third rotor position is not within range of 1..26 ({rotorposa})"
raise ValueError(_lowerCamelCase )
# Validates string and returns dict
_lowerCAmelCase : Any = _plugboard(_lowerCamelCase )
return rotpos, rotsel, pbdict
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> dict[str, str]:
# tests the input string if it
# a) is type string
# b) has even length (so pairs can be made)
if not isinstance(_lowerCamelCase ,_lowerCamelCase ):
_lowerCAmelCase : List[str] = f"Plugboard setting isn't type string ({type(_lowerCamelCase )})"
raise TypeError(_lowerCamelCase )
elif len(_lowerCamelCase ) % 2 != 0:
_lowerCAmelCase : Dict = f"Odd number of symbols ({len(_lowerCamelCase )})"
raise Exception(_lowerCamelCase )
elif pbstring == "":
return {}
pbstring.replace(""" """ ,"""""" )
# Checks if all characters are unique
_lowerCAmelCase : Tuple = set()
for i in pbstring:
if i not in abc:
_lowerCAmelCase : Any = f"'{i}' not in list of symbols"
raise Exception(_lowerCamelCase )
elif i in tmppbl:
_lowerCAmelCase : str = f"Duplicate symbol ({i})"
raise Exception(_lowerCamelCase )
else:
tmppbl.add(_lowerCamelCase )
del tmppbl
# Created the dictionary
_lowerCAmelCase : List[Any] = {}
for j in range(0 ,len(_lowerCamelCase ) - 1 ,2 ):
_lowerCAmelCase : List[str] = pbstring[j + 1]
_lowerCAmelCase : str = pbstring[j]
return pb
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : RotorPositionT ,_lowerCamelCase : RotorSelectionT = (rotora, rotora, rotora) ,_lowerCamelCase : str = "" ,) -> str:
_lowerCAmelCase : List[Any] = text.upper()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = _validator(
_lowerCamelCase ,_lowerCamelCase ,plugb.upper() )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = rotor_position
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowerCAmelCase : Dict = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowerCAmelCase : Union[str, Any] = plugboard[symbol]
# rotor ra --------------------------
_lowerCAmelCase : List[str] = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : Optional[int] = rotora[index % len(_lowerCamelCase )]
# rotor rb --------------------------
_lowerCAmelCase : Dict = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : Tuple = rotora[index % len(_lowerCamelCase )]
# rotor rc --------------------------
_lowerCAmelCase : Any = abc.index(_lowerCamelCase ) + rotorposa
_lowerCAmelCase : int = rotora[index % len(_lowerCamelCase )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowerCAmelCase : Union[str, Any] = reflector[symbol]
# 2nd rotors
_lowerCAmelCase : Optional[int] = abc[rotora.index(_lowerCamelCase ) - rotorposa]
_lowerCAmelCase : str = abc[rotora.index(_lowerCamelCase ) - rotorposa]
_lowerCAmelCase : int = abc[rotora.index(_lowerCamelCase ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowerCAmelCase : int = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = 0
rotorposa += 1
if rotorposa >= len(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
_a : List[str] = 'This is my Python script that emulates the Enigma machine from WWII.'
_a : Optional[Any] = (1, 1, 1)
_a : Optional[int] = 'pictures'
_a : List[Any] = (rotora, rotora, rotora)
_a : List[Any] = enigma(message, rotor_pos, rotor_sel, pb)
print('Encrypted message:', en)
print('Decrypted message:', enigma(en, rotor_pos, rotor_sel, pb))
| 213 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : Optional[int] ={
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] =[
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 205 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case_ : Any =['''text''', '''image''', '''audio''']
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
inputs.append(create_inputs(lowerCAmelCase__ ) )
else:
raise ValueError(F"""Invalid type requested: {input_type}""" )
return inputs
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = []
for output in outputs:
if isinstance(lowerCAmelCase__ , (str, AgentText) ):
output_types.append("text" )
elif isinstance(lowerCAmelCase__ , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(lowerCAmelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"""Invalid output: {output}""" )
return output_types
@is_tool_test
class a__ :
def _lowerCamelCase ( self ) -> Dict:
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
__A = self.tool.inputs
for _input in inputs:
if isinstance(_input , lowercase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__A = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _lowerCamelCase ( self ) -> int:
__A = create_inputs(self.tool.inputs )
__A = self.tool(*lowercase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
__A = [outputs]
self.assertListEqual(output_types(lowercase__ ) , self.tool.outputs )
def _lowerCamelCase ( self ) -> Optional[Any]:
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _lowerCamelCase ( self ) -> Optional[int]:
__A = create_inputs(self.tool.inputs )
__A = self.tool(*lowercase__ )
if not isinstance(lowercase__ , lowercase__ ):
__A = [outputs]
self.assertEqual(len(lowercase__ ) , len(self.tool.outputs ) )
for output, output_type in zip(lowercase__ , self.tool.outputs ):
__A = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowercase__ , lowercase__ ) )
def _lowerCamelCase ( self ) -> Any:
__A = create_inputs(self.tool.inputs )
__A = []
for _input, input_type in zip(lowercase__ , self.tool.inputs ):
if isinstance(lowercase__ , lowercase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__A = self.tool(*lowercase__ )
if not isinstance(lowercase__ , lowercase__ ):
__A = [outputs]
self.assertEqual(len(lowercase__ ) , len(self.tool.outputs ) )
| 205 | 1 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowerCAmelCase : int = numpy.array([0, 0])
__lowerCAmelCase : int = numpy.array([0.5, 0.8_6_6_0_2_5_4])
__lowerCAmelCase : List[str] = numpy.array([1, 0])
__lowerCAmelCase : str = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase ( UpperCamelCase__ : list[numpy.ndarray] , UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = initial_vectors
for _ in range(UpperCamelCase__ ):
__UpperCAmelCase = iteration_step(UpperCamelCase__ )
return vectors
def lowerCAmelCase ( UpperCamelCase__ : list[numpy.ndarray] ):
"""simple docstring"""
__UpperCAmelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
__UpperCAmelCase = vectors[i + 1]
new_vectors.append(UpperCamelCase__ )
__UpperCAmelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase ( UpperCamelCase__ : numpy.ndarray , UpperCamelCase__ : float ):
"""simple docstring"""
__UpperCAmelCase = numpy.radians(UpperCamelCase__ )
__UpperCAmelCase , __UpperCAmelCase = numpy.cos(UpperCamelCase__ ), numpy.sin(UpperCamelCase__ )
__UpperCAmelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( UpperCamelCase__ : list[numpy.ndarray] ):
"""simple docstring"""
__UpperCAmelCase = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
__UpperCAmelCase , __UpperCAmelCase = zip(*UpperCamelCase__ )
plt.plot(UpperCamelCase__ , UpperCamelCase__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : Dict = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 262 | '''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : list ):
"""simple docstring"""
__UpperCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__UpperCAmelCase = True
for i in range(0 , len(UpperCamelCase__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__UpperCAmelCase , __UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__UpperCAmelCase = False
for i in range(1 , len(UpperCamelCase__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__UpperCAmelCase , __UpperCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__UpperCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
__lowerCAmelCase : List[str] = [int(x) for x in input().split()]
# inputing elements of the list in one line
__lowerCAmelCase : Dict = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 262 | 1 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=13 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Any=True , UpperCamelCase__ : int=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Dict=99 , UpperCamelCase__ : Union[str, Any]=[1, 1, 2] , UpperCamelCase__ : List[Any]=1 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Optional[int]=8 , UpperCamelCase__ : List[Any]=37 , UpperCamelCase__ : str="gelu_new" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : str=512 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Dict=False , ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = block_sizes
__magic_name__ = num_decoder_layers
__magic_name__ = d_model
__magic_name__ = n_head
__magic_name__ = d_head
__magic_name__ = d_inner
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = 2
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = scope
__magic_name__ = initializer_std
# Used in the tests to check the size of the first attention layer
__magic_name__ = n_head
# Used in the tests to check the size of the first hidden state
__magic_name__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__magic_name__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__magic_name__ = self.num_hidden_layers + 2
def _lowercase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = TFFunnelModel(config=UpperCamelCase__ )
__magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__magic_name__ = model(UpperCamelCase__ )
__magic_name__ = [input_ids, input_mask]
__magic_name__ = model(UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__magic_name__ = False
__magic_name__ = TFFunnelModel(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__magic_name__ = False
__magic_name__ = TFFunnelModel(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def _lowercase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , ) -> Dict:
"""simple docstring"""
__magic_name__ = TFFunnelBaseModel(config=UpperCamelCase__ )
__magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__magic_name__ = model(UpperCamelCase__ )
__magic_name__ = [input_ids, input_mask]
__magic_name__ = model(UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__magic_name__ = False
__magic_name__ = TFFunnelBaseModel(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__magic_name__ = False
__magic_name__ = TFFunnelBaseModel(config=UpperCamelCase__ )
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def _lowercase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , ) -> List[Any]:
"""simple docstring"""
__magic_name__ = TFFunnelForPreTraining(config=UpperCamelCase__ )
__magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = TFFunnelForMaskedLM(config=UpperCamelCase__ )
__magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , ) -> List[str]:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = TFFunnelForSequenceClassification(config=UpperCamelCase__ )
__magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : str , ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.num_choices
__magic_name__ = TFFunnelForMultipleChoice(config=UpperCamelCase__ )
__magic_name__ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
__magic_name__ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
__magic_name__ = tf.tile(tf.expand_dims(UpperCamelCase__ , 1 ) , (1, self.num_choices, 1) )
__magic_name__ = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.num_labels
__magic_name__ = TFFunnelForTokenClassification(config=UpperCamelCase__ )
__magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , ) -> str:
"""simple docstring"""
__magic_name__ = TFFunnelForQuestionAnswering(config=UpperCamelCase__ )
__magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__magic_name__ = model(UpperCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__magic_name__ = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) = config_and_inputs
__magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _A , _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
a__ = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
__magic_name__ = TFFunnelModelTester(self )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def _lowercase ( self : Dict ) -> int:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
def _lowercase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
@require_tf
class UpperCAmelCase_ ( _A , unittest.TestCase ):
'''simple docstring'''
a__ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
a__ = False
a__ = False
def _lowercase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__magic_name__ = TFFunnelModelTester(self , base=UpperCamelCase__ )
__magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ )
def _lowercase ( self : Dict ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*UpperCamelCase__ )
def _lowercase ( self : List[str] ) -> Any:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
| 717 |
def a__ ( A_ ):
'''simple docstring'''
return " ".join(
"""""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('Hey wollef sroirraw'))
| 76 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( __UpperCamelCase , unittest.TestCase ):
lowercase : Any =PhobertTokenizer
lowercase : Union[str, Any] =False
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ =["""T@@""", """i""", """I""", """R@@""", """r""", """e@@"""]
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ =["""#version: 0.2""", """l à</w>"""]
lowerCamelCase_ ={"""unk_token""": """<unk>"""}
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(lowerCAmelCase ) )
def lowercase__ ( self, **lowerCAmelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ ="""Tôi là VinAI Research"""
lowerCamelCase_ ="""T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>"""
return input_text, output_text
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =PhobertTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
lowerCamelCase_ ="""Tôi là VinAI Research"""
lowerCamelCase_ ="""T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h""".split()
lowerCamelCase_ =tokenizer.tokenize(lowerCAmelCase )
print(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, lowerCAmelCase )
lowerCamelCase_ =tokens + [tokenizer.unk_token]
lowerCamelCase_ =[4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ), lowerCAmelCase )
| 676 |
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase: Tuple =logging.get_logger(__name__)
lowerCAmelCase: int =[
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def __snake_case ( __A ) -> Dict:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
lowercase : Any = k.replace(__A ,__A )
if k.startswith("""encoder""" ):
lowercase : List[str] = k.replace(""".attn""" ,""".self_attn""" )
lowercase : Union[str, Any] = k.replace("""norm1""" ,"""self_attn_layer_norm""" )
lowercase : List[Any] = k.replace("""norm2""" ,"""final_layer_norm""" )
elif k.startswith("""decoder""" ):
lowercase : Union[str, Any] = k.replace("""norm1""" ,"""self_attn_layer_norm""" )
lowercase : Tuple = k.replace("""norm2""" ,"""encoder_attn_layer_norm""" )
lowercase : Dict = k.replace("""norm3""" ,"""final_layer_norm""" )
return k
def __snake_case ( __A ) -> Dict:
lowercase : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
lowercase : Union[str, Any] = sd.pop(__A )
lowercase : Optional[int] = k.replace("""layernorm_embedding""" ,"""layer_norm""" )
assert new_k not in sd
lowercase : List[Any] = v
lowerCAmelCase: Union[str, Any] =["START"]
@torch.no_grad()
def __snake_case ( __A ,__A ,__A ) -> int:
lowercase : Union[str, Any] = torch.load(__A ,map_location="""cpu""" )
lowercase : Optional[Any] = model["""model"""]
lowercase : Union[str, Any] = BlenderbotConfig.from_json_file(__A )
lowercase : Optional[Any] = BlenderbotForConditionalGeneration(__A )
lowercase : List[str] = m.model.state_dict().keys()
lowercase : Optional[Any] = []
lowercase : Optional[int] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
lowercase : str = rename_state_dict_key(__A )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
lowercase : Dict = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__A )
m.model.load_state_dict(__A ,strict=__A )
m.half()
m.save_pretrained(__A )
if __name__ == "__main__":
lowerCAmelCase: Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
lowerCAmelCase: str =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 607 | 0 |
"""simple docstring"""
from random import randint, random
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int , snake_case__ : int , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : int = 5 , ):
"""simple docstring"""
_snake_case : Dict = [[-1] * number_of_cells] # Create a highway without any car
_snake_case : List[Any] = 0
_snake_case : Any = max(snake_case__ , 0 )
while i < number_of_cells:
_snake_case : Optional[int] = (
randint(0 , snake_case__ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def UpperCAmelCase__ (snake_case__ : list , snake_case__ : int ):
"""simple docstring"""
_snake_case : Optional[int] = 0
_snake_case : Dict = highway_now[car_index + 1 :]
for cell in range(len(snake_case__ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(snake_case__ , -1 )
def UpperCAmelCase__ (snake_case__ : list , snake_case__ : float , snake_case__ : int ):
"""simple docstring"""
_snake_case : Tuple = len(snake_case__ )
# Beforce calculations, the highway is empty
_snake_case : Optional[int] = [-1] * number_of_cells
for car_index in range(snake_case__ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_snake_case : Tuple = min(highway_now[car_index] + 1 , snake_case__ )
# Number of empty cell before the next car
_snake_case : Union[str, Any] = get_distance(snake_case__ , snake_case__ ) - 1
# We can't have the car causing an accident
_snake_case : List[Any] = min(next_highway[car_index] , snake_case__ )
if random() < probability:
# Randomly, a driver will slow down
_snake_case : int = max(next_highway[car_index] - 1 , 0 )
return next_highway
def UpperCAmelCase__ (snake_case__ : list , snake_case__ : int , snake_case__ : float , snake_case__ : int ):
"""simple docstring"""
_snake_case : str = len(highway[0] )
for i in range(snake_case__ ):
_snake_case : List[Any] = update(highway[i] , snake_case__ , snake_case__ )
_snake_case : Optional[int] = [-1] * number_of_cells
for car_index in range(snake_case__ ):
_snake_case : List[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_snake_case : Dict = (car_index + speed) % number_of_cells
# Commit the change of position
_snake_case : Optional[Any] = speed
highway.append(snake_case__ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A_ = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ['''OwlViTFeatureExtractor''']
A_ = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 28 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase : int = [0] * len(_UpperCAmelCase )
lowerCAmelCase : Any = []
lowerCAmelCase : Optional[Any] = [1] * len(_UpperCAmelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_UpperCAmelCase ) ):
if indegree[i] == 0:
queue.append(_UpperCAmelCase )
while queue:
lowerCAmelCase : Union[str, Any] = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCAmelCase : Optional[int] = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_UpperCAmelCase )
print(max(_UpperCAmelCase ) )
# Adjacency list of Graph
lowerCAmelCase__ = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 645 |
__A : Optional[Any] = '''Input must be a string of 8 numbers plus letter'''
__A : str = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
lowerCAmelCase : str = f"Expected string as input, found {type(_UpperCAmelCase ).__name__}"
raise TypeError(_UpperCAmelCase )
lowerCAmelCase : Dict = spanish_id.replace('-', '' ).upper()
if len(_UpperCAmelCase ) != 9:
raise ValueError(_UpperCAmelCase )
try:
lowerCAmelCase : Tuple = int(spanish_id_clean[0:8] )
lowerCAmelCase : Union[str, Any] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_UpperCAmelCase ) from ex
if letter.isdigit():
raise ValueError(_UpperCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case: List[str] = {
"configuration_mobilenet_v2": [
"MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"MobileNetV2Config",
"MobileNetV2OnnxConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: Any = ["MobileNetV2FeatureExtractor"]
__snake_case: str = ["MobileNetV2ImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case: List[str] = [
"MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileNetV2ForImageClassification",
"MobileNetV2ForSemanticSegmentation",
"MobileNetV2Model",
"MobileNetV2PreTrainedModel",
"load_tf_weights_in_mobilenet_v2",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__snake_case: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
a_ = 42
a_ = None
def _snake_case ( A_ : int , A_ : List[str]=0.999 , A_ : List[Any]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A_ : List[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A_ : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a_ : int = []
for i in range(A_ ):
a_ : Optional[Any] = i / num_diffusion_timesteps
a_ : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A_ ) / alpha_bar_fn(A_ ) , A_ ) )
return torch.tensor(A_ , dtype=torch.floataa )
class _UpperCAmelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , lowerCAmelCase_ = 10_00 , lowerCAmelCase_ = "fixed_small_log" , lowerCAmelCase_ = True , lowerCAmelCase_ = 1.0 , lowerCAmelCase_ = "epsilon" , lowerCAmelCase_ = "squaredcos_cap_v2" , ):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
a_ : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase_ )
a_ : Optional[Any] = 1.0 - self.betas
a_ : List[str] = torch.cumprod(self.alphas , dim=0 )
a_ : Any = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
a_ : Any = 1.0
# setable values
a_ : List[Any] = None
a_ : Any = torch.from_numpy(np.arange(0 , lowerCAmelCase_ )[::-1].copy() )
a_ : Any = variance_type
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
return sample
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
'''simple docstring'''
a_ : List[str] = num_inference_steps
a_ : Dict = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
a_ : Optional[Any] = (np.arange(0 , lowerCAmelCase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
a_ : List[Any] = torch.from_numpy(lowerCAmelCase_ ).to(lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ):
'''simple docstring'''
if prev_timestep is None:
a_ : Tuple = t - 1
a_ : Optional[Any] = self.alphas_cumprod[t]
a_ : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a_ : Optional[Any] = 1 - alpha_prod_t
a_ : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a_ : int = self.betas[t]
else:
a_ : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
a_ : str = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
a_ : int = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
a_ : List[Any] = torch.log(torch.clamp(lowerCAmelCase_ , min=1E-2_0 ) )
a_ : List[Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
a_ : Tuple = variance.log()
a_ : int = beta.log()
a_ : List[Any] = (predicted_variance + 1) / 2
a_ : Dict = frac * max_log + (1 - frac) * min_log
return variance
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_=None , lowerCAmelCase_ = True , ):
'''simple docstring'''
a_ : Tuple = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
a_ , a_ : Optional[Any] = torch.split(lowerCAmelCase_ , sample.shape[1] , dim=1 )
else:
a_ : Optional[int] = None
# 1. compute alphas, betas
if prev_timestep is None:
a_ : List[Any] = t - 1
a_ : int = self.alphas_cumprod[t]
a_ : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
a_ : Any = 1 - alpha_prod_t
a_ : Any = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
a_ : Optional[int] = self.betas[t]
a_ : Any = self.alphas[t]
else:
a_ : List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
a_ : List[str] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
a_ : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
a_ : Optional[int] = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
a_ : Any = torch.clamp(
lowerCAmelCase_ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a_ : str = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
a_ : Dict = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
a_ : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
a_ : List[Any] = 0
if t > 0:
a_ : List[Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowerCAmelCase_ , device=model_output.device )
a_ : List[Any] = self._get_variance(
lowerCAmelCase_ , predicted_variance=lowerCAmelCase_ , prev_timestep=lowerCAmelCase_ , )
if self.variance_type == "fixed_small_log":
a_ : Dict = variance
elif self.variance_type == "learned_range":
a_ : Any = (0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
""" for the UnCLIPScheduler.""" )
a_ : List[str] = variance * variance_noise
a_ : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCAmelCase_ , pred_original_sample=lowerCAmelCase_ )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
'''simple docstring'''
a_ : List[str] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
a_ : Union[str, Any] = timesteps.to(original_samples.device )
a_ : List[Any] = alphas_cumprod[timesteps] ** 0.5
a_ : Optional[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
a_ : int = sqrt_alpha_prod.unsqueeze(-1 )
a_ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
a_ : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
a_ : str = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
a_ : int = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 460 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 67 |
def _lowerCAmelCase ( UpperCamelCase__: Union[str, Any] ) -> Dict:
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _lowerCAmelCase ( UpperCamelCase__: dict[int, list[int]] ) -> list[tuple[int, int]]:
"""simple docstring"""
A = 0
A = len(UpperCamelCase__ ) # No of vertices in graph
A = [0] * n
A = [False] * n
def dfs(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Any ):
A = True
A = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , id_ )
A = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
A = min(low[at] , low[to] )
A = []
for i in range(UpperCamelCase__ ):
if not visited[i]:
dfs(UpperCamelCase__ , -1 , UpperCamelCase__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 641 | 0 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self ):
__a = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
__a = AutoTokenizer.from_pretrained("""google/mt5-small""" )
__a = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
__a = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
__a = shift_tokens_right(_a , model.config.pad_token_id , model.config.decoder_start_token_id )
__a = model(_a , decoder_input_ids=_a ).logits
__a = optax.softmax_cross_entropy(_a , onehot(_a , logits.shape[-1] ) ).mean()
__a = -(labels.shape[-1] * loss.item())
__a = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 721 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = 42
class __UpperCAmelCase ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 3 , __A = 3 , __A = ("DownEncoderBlock2D",) , __A = ("UpDecoderBlock2D",) , __A = (64,) , __A = 1 , __A = "silu" , __A = 3 , __A = 32 , __A = 256 , __A = 32 , __A = None , __A = 0.18215 , __A = "group" , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
__a = vq_embed_dim if vq_embed_dim is not None else latent_channels
__a = nn.Convad(__A , __A , 1 )
__a = VectorQuantizer(__A , __A , beta=0.25 , remap=__A , sane_index_shape=__A )
__a = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
__a = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def snake_case_ ( self , __A , __A = True ):
__a = self.encoder(__A )
__a = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def snake_case_ ( self , __A , __A = False , __A = True ):
# also go through quantization layer
if not force_not_quantize:
__a , __a , __a = self.quantize(__A )
else:
__a = h
__a = self.post_quant_conv(__A )
__a = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def snake_case_ ( self , __A , __A = True ):
__a = sample
__a = self.encode(__A ).latents
__a = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
| 209 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_lowerCamelCase : Tuple = Mapping[str, np.ndarray]
_lowerCamelCase : Any = Mapping[str, Any] # Is a nested dict.
_lowerCamelCase : Tuple = 0.01
@dataclasses.dataclass(frozen=SCREAMING_SNAKE_CASE_)
class lowercase :
'''simple docstring'''
UpperCAmelCase : np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCAmelCase : np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCAmelCase : np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCAmelCase : np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCAmelCase : np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCAmelCase : Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCAmelCase : Optional[str] = None
# Templates used to generate this protein (prediction-only)
UpperCAmelCase : Optional[Sequence[str]] = None
# Chain corresponding to each parent
UpperCAmelCase : Optional[Sequence[int]] = None
def __a ( __lowerCAmelCase ) -> Protein:
SCREAMING_SNAKE_CASE : Optional[Any] = r'(\[[A-Z]+\]\n)'
SCREAMING_SNAKE_CASE : List[str] = [tag.strip() for tag in re.split(__lowerCAmelCase , __lowerCAmelCase ) if len(__lowerCAmelCase ) > 0]
SCREAMING_SNAKE_CASE : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('\n' ) for l in tags[1::2]] )
SCREAMING_SNAKE_CASE : List[str] = ["N", "CA", "C"]
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Optional[Any] = None
SCREAMING_SNAKE_CASE : Tuple = None
for g in groups:
if "[PRIMARY]" == g[0]:
SCREAMING_SNAKE_CASE : List[str] = g[1][0].strip()
for i in range(len(__lowerCAmelCase ) ):
if seq[i] not in residue_constants.restypes:
SCREAMING_SNAKE_CASE : str = 'X' # FIXME: strings are immutable
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(
[residue_constants.restype_order.get(__lowerCAmelCase , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
SCREAMING_SNAKE_CASE : List[List[float]] = []
for axis in range(3 ):
tertiary.append(list(map(__lowerCAmelCase , g[1][axis].split() ) ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Dict = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : int = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
SCREAMING_SNAKE_CASE : str = np.array(list(map({'-': 0, '+': 1}.get , g[1][0].strip() ) ) )
SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros(
(
len(__lowerCAmelCase ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : str = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__lowerCAmelCase , atom_mask=__lowerCAmelCase , aatype=__lowerCAmelCase , residue_index=np.arange(len(__lowerCAmelCase ) ) , b_factors=__lowerCAmelCase , )
def __a ( __lowerCAmelCase , __lowerCAmelCase = 0 ) -> List[str]:
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Any = prot.remark
if remark is not None:
pdb_headers.append(F'''REMARK {remark}''' )
SCREAMING_SNAKE_CASE : List[str] = prot.parents
SCREAMING_SNAKE_CASE : int = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
SCREAMING_SNAKE_CASE : Any = [p for i, p in zip(__lowerCAmelCase , __lowerCAmelCase ) if i == chain_id]
if parents is None or len(__lowerCAmelCase ) == 0:
SCREAMING_SNAKE_CASE : int = ['N/A']
pdb_headers.append(F'''PARENT {' '.join(__lowerCAmelCase )}''' )
return pdb_headers
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Union[str, Any] = pdb_str.split('\n' )
SCREAMING_SNAKE_CASE : Dict = prot.remark
if remark is not None:
out_pdb_lines.append(F'''REMARK {remark}''' )
SCREAMING_SNAKE_CASE : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = []
if prot.parents_chain_index is not None:
SCREAMING_SNAKE_CASE : Dict[str, List[str]] = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__lowerCAmelCase ) , [] )
parent_dict[str(__lowerCAmelCase )].append(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : int = max([int(__lowerCAmelCase ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
SCREAMING_SNAKE_CASE : Union[str, Any] = parent_dict.get(str(__lowerCAmelCase ) , ['N/A'] )
parents_per_chain.append(__lowerCAmelCase )
else:
parents_per_chain.append(list(prot.parents ) )
else:
SCREAMING_SNAKE_CASE : Dict = [['N/A']]
def make_parent_line(__lowerCAmelCase ) -> str:
return F'''PARENT {' '.join(__lowerCAmelCase )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i, l in enumerate(__lowerCAmelCase ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__lowerCAmelCase )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : str = parents_per_chain[chain_counter]
else:
SCREAMING_SNAKE_CASE : Tuple = ['N/A']
out_pdb_lines.append(make_parent_line(__lowerCAmelCase ) )
return "\n".join(__lowerCAmelCase )
def __a ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE : List[Any] = residue_constants.restypes + ['X']
def res_atoa(__lowerCAmelCase ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , 'UNK' )
SCREAMING_SNAKE_CASE : List[str] = residue_constants.atom_types
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Dict = prot.atom_mask
SCREAMING_SNAKE_CASE : List[str] = prot.aatype
SCREAMING_SNAKE_CASE : List[str] = prot.atom_positions
SCREAMING_SNAKE_CASE : str = prot.residue_index.astype(np.intaa )
SCREAMING_SNAKE_CASE : Tuple = prot.b_factors
SCREAMING_SNAKE_CASE : Dict = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('Invalid aatypes.' )
SCREAMING_SNAKE_CASE : str = get_pdb_headers(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
pdb_lines.extend(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = aatype.shape[0]
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Tuple = string.ascii_uppercase
SCREAMING_SNAKE_CASE : str = None
# Add all atom sites.
for i in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : Tuple = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__lowerCAmelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
SCREAMING_SNAKE_CASE : Dict = 'ATOM'
SCREAMING_SNAKE_CASE : Optional[int] = atom_name if len(__lowerCAmelCase ) == 4 else F''' {atom_name}'''
SCREAMING_SNAKE_CASE : int = ''
SCREAMING_SNAKE_CASE : Tuple = ''
SCREAMING_SNAKE_CASE : int = 1.00
SCREAMING_SNAKE_CASE : List[str] = atom_name[0] # Protein supports only C, N, O, S, this works.
SCREAMING_SNAKE_CASE : str = ''
SCREAMING_SNAKE_CASE : Any = 'A'
if chain_index is not None:
SCREAMING_SNAKE_CASE : int = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
SCREAMING_SNAKE_CASE : Tuple = (
F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
F'''{res_name_a:>3} {chain_tag:>1}'''
F'''{residue_index[i]:>4}{insertion_code:>1} '''
F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
F'''{occupancy:>6.2f}{b_factor:>6.2f} '''
F'''{element:>2}{charge:>2}'''
)
pdb_lines.append(__lowerCAmelCase )
atom_index += 1
SCREAMING_SNAKE_CASE : Tuple = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
SCREAMING_SNAKE_CASE : Dict = True
SCREAMING_SNAKE_CASE : Any = chain_index[i + 1]
if should_terminate:
# Close the chain.
SCREAMING_SNAKE_CASE : Union[str, Any] = 'TER'
SCREAMING_SNAKE_CASE : Union[str, Any] = (
F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(__lowerCAmelCase )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__lowerCAmelCase , __lowerCAmelCase ) )
pdb_lines.append('END' )
pdb_lines.append('' )
return "\n".join(__lowerCAmelCase )
def __a ( __lowerCAmelCase ) -> np.ndarray:
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def __a ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ) -> Protein:
return Protein(
aatype=features['aatype'] , atom_positions=result['final_atom_positions'] , atom_mask=result['final_atom_mask'] , residue_index=features['residue_index'] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['final_atom_mask'] ) , chain_index=__lowerCAmelCase , remark=__lowerCAmelCase , parents=__lowerCAmelCase , parents_chain_index=__lowerCAmelCase , ) | 352 |
def __a ( __lowerCAmelCase ) -> list[list[float]]:
SCREAMING_SNAKE_CASE : list[list[float]] = []
for data in source_data:
for i, el in enumerate(__lowerCAmelCase ):
if len(__lowerCAmelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__lowerCAmelCase ) )
return data_lists
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> list[list[float]]:
SCREAMING_SNAKE_CASE : list[list[float]] = []
for dlist, weight in zip(__lowerCAmelCase , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE : str = min(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : int = max(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = F'''Invalid weight of {weight:f} provided'''
raise ValueError(__lowerCAmelCase )
score_lists.append(__lowerCAmelCase )
return score_lists
def __a ( __lowerCAmelCase ) -> list[float]:
SCREAMING_SNAKE_CASE : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : str = final_scores[j] + ele
return final_scores
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> list[list[float]]:
SCREAMING_SNAKE_CASE : Tuple = get_data(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = calculate_each_score(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : str = generate_final_scores(__lowerCAmelCase )
# append scores to source data
for i, ele in enumerate(__lowerCAmelCase ):
source_data[i].append(__lowerCAmelCase )
return source_data | 352 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : list ):
'''simple docstring'''
if len(UpperCamelCase ) < 2:
return collection
def circle_sort_util(UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int ) -> bool:
_a = False
if low == high:
return swapped
_a = low
_a = high
while left < right:
if collection[left] > collection[right]:
_a , _a = (
collection[right],
collection[left],
)
_a = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_a , _a = (
collection[right + 1],
collection[left],
)
_a = True
_a = low + int((high - low) / 2 )
_a = circle_sort_util(UpperCamelCase , UpperCamelCase , UpperCamelCase )
_a = circle_sort_util(UpperCamelCase , mid + 1 , UpperCamelCase )
return swapped or left_swap or right_swap
_a = True
while is_not_sorted is True:
_a = circle_sort_util(UpperCamelCase , 0 , len(UpperCamelCase ) - 1 )
return collection
if __name__ == "__main__":
_snake_case : str = input('Enter numbers separated by a comma:\n').strip()
_snake_case : List[str] = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 377 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : Tuple = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_a = PegasusTokenizer(lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __lowerCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : List[str] ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_a = '''</s>'''
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(lowerCAmelCase_ ) , 11_03 )
def __lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_a = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_a = self.tokenizer_class.from_pretrained(self.tmpdirname )
_a = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
_a = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
_a = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_a = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_a = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
_a = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_a = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_a = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
_a = '''To ensure a smooth flow of bank resolutions.'''
_a = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_a = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''This is going to be way too long.''' * 1_50, '''short example''']
_a = ['''not super long but more than 5 tokens''', '''tiny''']
_a = self._large_tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
_a = self._large_tokenizer(
text_target=lowerCAmelCase_ , max_length=5 , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase_ ) == 2 # input_ids, attention_mask.
@slow
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_a = PegasusTokenizer(lowerCAmelCase_ , offset=0 , mask_token_sent=lowerCAmelCase_ , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __lowerCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : Tuple ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_a = self.tokenizer_class.from_pretrained(self.tmpdirname )
_a = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
_a = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
_a = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_a = ['''This is going to be way too long.''' * 10_00, '''short example''']
_a = ['''not super long but more than 5 tokens''', '''tiny''']
_a = self._large_tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
_a = self._large_tokenizer(
text_target=lowerCAmelCase_ , max_length=5 , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase_ ) == 2 # input_ids, attention_mask.
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
_a = self._large_tokenizer(lowerCAmelCase_ ).input_ids
self.assertListEqual(
lowerCAmelCase_ , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 377 | 1 |
__a :Optional[int] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_0000)]
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
A_ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__a :list[bool | None] = [None] * 1000_0000
__a :Optional[Any] = True
__a :List[Any] = False
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A_ = chain(next_number(__UpperCamelCase ) )
A_ = number_chain
while number < 1000_0000:
A_ = number_chain
number *= 10
return number_chain
def __snake_case ( __UpperCamelCase : int = 1000_0000 ):
"""simple docstring"""
for i in range(1 ,__UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }") | 86 |
from pathlib import Path
import fire
from tqdm import tqdm
def lowerCAmelCase_ ( lowerCamelCase="ro" , lowerCamelCase="en" , lowerCamelCase="wmt16" , lowerCamelCase=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("""run pip install datasets""" )
__magic_name__ : Dict =F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
__magic_name__ : Dict =datasets.load_dataset(lowerCamelCase , lowerCamelCase )
if save_dir is None:
__magic_name__ : Optional[int] =F"{dataset}-{pair}"
__magic_name__ : int =Path(lowerCamelCase )
save_dir.mkdir(exist_ok=lowerCamelCase )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
__magic_name__ : Dict ="""val""" if split == """validation""" else split
__magic_name__ : List[Any] =save_dir.joinpath(F"{fn}.source" )
__magic_name__ : Optional[int] =save_dir.joinpath(F"{fn}.target" )
__magic_name__ : Optional[Any] =src_path.open("""w+""" )
__magic_name__ : List[Any] =tgt_path.open("""w+""" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ : str =x["""translation"""]
src_fp.write(ex[src_lang] + """\n""" )
tgt_fp.write(ex[tgt_lang] + """\n""" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 21 | 0 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class lowerCAmelCase_ ( unittest.TestCase , lowerCamelCase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
snake_case_ = load_tool('''text-to-speech''' )
self.tool.setup()
def UpperCamelCase__ ( self ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
snake_case_ = self.tool('''hey''' )
snake_case_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) )
def UpperCamelCase__ ( self ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
snake_case_ = self.tool('''hey''' )
snake_case_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485] ) , ) ) | 531 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=30 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=2 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = num_patches + 2
def UpperCamelCase__ ( self ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = DeiTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = DeiTForMaskedImageModeling(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = DeiTForMaskedImageModeling(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = self.type_sequence_label_size
snake_case_ = DeiTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = DeiTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__snake_case = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def UpperCamelCase__ ( self ):
snake_case_ = DeiTModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def UpperCamelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
snake_case_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ):
if not self.model_tester.is_training:
return
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_UpperCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
snake_case_ = model(**_UpperCAmelCase ).loss
loss.backward()
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case_ = False
snake_case_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
snake_case_ = model_class(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
snake_case_ = model(**_UpperCAmelCase ).loss
loss.backward()
def UpperCamelCase__ ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F'''Testing {model_class} with {problem_type["title"]}''' ):
snake_case_ = problem_type['''title''']
snake_case_ = problem_type['''num_labels''']
snake_case_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
snake_case_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
if problem_type["num_labels"] > 1:
snake_case_ = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
snake_case_ = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_UpperCAmelCase ) as warning_list:
snake_case_ = model(**_UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def UpperCamelCase__ ( self ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = DeiTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCAmelCase ()-> Optional[int]:
"""simple docstring"""
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
snake_case_ = DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
_UpperCAmelCase )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(**_UpperCAmelCase )
# verify the logits
snake_case_ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
snake_case_ = torch.tensor([-1.0_266, 0.1_912, -1.2_861] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ):
snake_case_ = DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''' , torch_dtype=torch.floataa , device_map='''auto''' )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = inputs.pixel_values.to(_UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case_ = model(_UpperCAmelCase ) | 531 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
_UpperCAmelCase = torch.permute(SCREAMING_SNAKE_CASE_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE_ ):
# linear layer
_UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
_UpperCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "metadata" in layer:
_UpperCAmelCase = layer.split('''metadata''' )
_UpperCAmelCase = ''''''.join(split_layer[0] )[:-1]
_UpperCAmelCase = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
_UpperCAmelCase = layer.split('''kvstore''' )
_UpperCAmelCase = ''''''.join(split_layer[0] )[:-1]
_UpperCAmelCase = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
_UpperCAmelCase = layer.split('''/''' )
_UpperCAmelCase = '''/'''.join(split_layer[:-1] )
_UpperCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
_UpperCAmelCase = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_UpperCAmelCase = '''file'''
else:
_UpperCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = rename_keys(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = {}
for k, v in current_block.items():
_UpperCAmelCase = v
_UpperCAmelCase = new_current_block
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str = WEIGHTS_NAME ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = convert_file_size_to_int(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
_UpperCAmelCase = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
_UpperCAmelCase = flatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' )
_UpperCAmelCase = {}
for layer in checkpoint_info.keys():
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_key_and_tensorstore_dict(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if curr_real_layer_name in all_layers:
_UpperCAmelCase = content
else:
_UpperCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_UpperCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_UpperCAmelCase = torch.tensor(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_UpperCAmelCase , _UpperCAmelCase = rename_base_flax_keys(tuple(key.split('''/''' ) ) , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = '''/'''.join(SCREAMING_SNAKE_CASE_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_UpperCAmelCase = os.path.join(
SCREAMING_SNAKE_CASE_ , weights_name.replace('''.bin''' , F'''-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = raw_weights.to(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , weights_name.replace('''.bin''' , F'''-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = weights_name.replace(
'''.bin''' , F'''-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = shard
for key in shard:
_UpperCAmelCase = shard_file
# Add the metadata
_UpperCAmelCase = {'''total_size''': total_size}
_UpperCAmelCase = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , '''w''' , encoding='''utf-8''' ) as f:
_UpperCAmelCase = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE_ )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase_ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A__ ( ) -> Dict:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_UpperCAmelCase = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
_UpperCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
_UpperCAmelCase = TaTokenizer.from_pretrained('''t5-small''' )
_UpperCAmelCase = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
_UpperCAmelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).input_ids
_UpperCAmelCase = model.generate(SCREAMING_SNAKE_CASE_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) ) | 32 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (__A , __A , unittest.TestCase ):
"""simple docstring"""
_a : str = CycleDiffusionPipeline
_a : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
_a : str = PipelineTesterMixin.required_optional_params - {'''latents'''}
_a : str = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
_a : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_a : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _a ( self ):
"""simple docstring"""
torch.manual_seed(0 )
a_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
a_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1_000 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
a_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a_ = CLIPTextModel(UpperCamelCase__ )
a_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _a ( self , UpperCamelCase__ , UpperCamelCase__=0 ):
"""simple docstring"""
a_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
a_ = image / 2 + 0.5
if str(UpperCamelCase__ ).startswith('mps' ):
a_ = torch.manual_seed(UpperCamelCase__ )
else:
a_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
a_ = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def _a ( self ):
"""simple docstring"""
a_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components()
a_ = CycleDiffusionPipeline(**UpperCamelCase__ )
a_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = self.get_dummy_inputs(UpperCamelCase__ )
a_ = pipe(**UpperCamelCase__ )
a_ = output.images
a_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a_ = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _a ( self ):
"""simple docstring"""
a_ = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCamelCase__ , 'half' ):
a_ = module.half()
a_ = CycleDiffusionPipeline(**UpperCamelCase__ )
a_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a_ = self.get_dummy_inputs(UpperCamelCase__ )
a_ = pipe(**UpperCamelCase__ )
a_ = output.images
a_ = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
a_ = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _a ( self ):
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def _a ( self ):
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def _a ( self ):
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def _a ( self ):
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def _a ( self ):
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def _a ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ):
"""simple docstring"""
a_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
a_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
a_ = init_image.resize((512, 512) )
a_ = 'CompVis/stable-diffusion-v1-4'
a_ = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
a_ = CycleDiffusionPipeline.from_pretrained(
UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
a_ = 'A black colored car'
a_ = 'A blue colored car'
a_ = torch.manual_seed(0 )
a_ = pipe(
prompt=UpperCamelCase__ , source_prompt=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCamelCase__ , output_type='np' , )
a_ = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def _a ( self ):
"""simple docstring"""
a_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
a_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
a_ = init_image.resize((512, 512) )
a_ = 'CompVis/stable-diffusion-v1-4'
a_ = DDIMScheduler.from_pretrained(UpperCamelCase__ , subfolder='scheduler' )
a_ = CycleDiffusionPipeline.from_pretrained(UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
a_ = 'A black colored car'
a_ = 'A blue colored car'
a_ = torch.manual_seed(0 )
a_ = pipe(
prompt=UpperCamelCase__ , source_prompt=UpperCamelCase__ , image=UpperCamelCase__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCamelCase__ , output_type='np' , )
a_ = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 536 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCamelCase = {
"""configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ErnieForCausalLM""",
"""ErnieForMaskedLM""",
"""ErnieForMultipleChoice""",
"""ErnieForNextSentencePrediction""",
"""ErnieForPreTraining""",
"""ErnieForQuestionAnswering""",
"""ErnieForSequenceClassification""",
"""ErnieForTokenClassification""",
"""ErnieModel""",
"""ErniePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return getitem, k
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return setitem, k, v
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return delitem, k
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ):
try:
return fun(SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE ), None
except Exception as e:
return None, e
UpperCamelCase = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
UpperCamelCase = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
UpperCamelCase = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
UpperCamelCase = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
UpperCamelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
UpperCamelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'''operations''' , (
pytest.param(_add_items , id='''add items''' ),
pytest.param(_overwrite_items , id='''overwrite items''' ),
pytest.param(_delete_items , id='''delete items''' ),
pytest.param(_access_absent_items , id='''access absent items''' ),
pytest.param(_add_with_resize_up , id='''add with resize up''' ),
pytest.param(_add_with_resize_down , id='''add with resize down''' ),
) , )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = HashMap(initial_block_size=4 )
A_ : Union[str, Any] = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE ):
A_ , A_ : Union[str, Any] = _run_operation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
A_ , A_ : int = _run_operation(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , *SCREAMING_SNAKE_CASE )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE ) == str(SCREAMING_SNAKE_CASE )
assert set(SCREAMING_SNAKE_CASE ) == set(SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
assert set(my.items() ) == set(py.items() )
def _SCREAMING_SNAKE_CASE ( ):
def is_public(SCREAMING_SNAKE_CASE ) -> bool:
return not name.startswith('''_''' )
A_ : Tuple = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE )}
A_ : Optional[Any] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE )}
assert dict_public_names > hash_public_names
| 152 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase : Optional[Any] = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 476 |
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : Tuple = 16
__lowercase : int = 32
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
return int(x / 2**20 )
class __UpperCamelCase :
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__a : Union[str, Any] = torch.cuda.memory_allocated()
return self
def __exit__( self , *__a ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__a : List[str] = torch.cuda.memory_allocated()
__a : Union[str, Any] = torch.cuda.max_memory_allocated()
__a : int = bamb(self.end - self.begin )
__a : Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" , _SCREAMING_SNAKE_CASE : int = 320 , _SCREAMING_SNAKE_CASE : int = 160 , ):
__a : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : int = load_dataset(
'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} )
def tokenize_function(_SCREAMING_SNAKE_CASE : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__a : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : Dict = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_SCREAMING_SNAKE_CASE : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__a : Any = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__a : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
# Initialize accelerator
__a : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Tuple = config['lr']
__a : List[Any] = int(config['num_epochs'] )
__a : List[Any] = int(config['seed'] )
__a : List[str] = int(config['batch_size'] )
__a : Optional[Any] = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
__a , __a : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Tuple = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__a : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a : str = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__a : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__a : Optional[Any] = 1
__a : List[Any] = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a : int = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
__a : Dict = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : str = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__a : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
__a : Dict = 0
# Now we train the model
__a : Optional[Any] = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__a : int = model(**_SCREAMING_SNAKE_CASE )
__a : str = outputs.loss
__a : Dict = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__a : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : List[str] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , )
__a : List[Any] = parser.parse_args()
__a : str = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 476 | 1 |
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
__UpperCAmelCase : Tuple = (1 + 2_4 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowerCamelCase ( _UpperCamelCase : int = 5_0_0_0 ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = [(i * (3 * i - 1)) // 2 for i in range(1 , _UpperCamelCase )]
for i, pentagonal_i in enumerate(_UpperCamelCase ):
for j in range(_UpperCamelCase , len(_UpperCamelCase ) ):
__UpperCAmelCase : Optional[int] = pentagonal_nums[j]
__UpperCAmelCase : int = pentagonal_i + pentagonal_j
__UpperCAmelCase : str = pentagonal_j - pentagonal_i
if is_pentagonal(_UpperCamelCase ) and is_pentagonal(_UpperCamelCase ):
return b
return -1
if __name__ == "__main__":
print(F"{solution() = }")
| 710 |
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
UpperCAmelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCamelCase ( _UpperCamelCase : str ) -> Optional[Any]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__UpperCAmelCase : List[str] = model_type_to_module_name(_UpperCamelCase )
__UpperCAmelCase : List[str] = importlib.import_module(f'''.{module_name}''' , """transformers.models""" )
try:
return getattr(_UpperCamelCase , _UpperCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_UpperCamelCase , """__name__""" , _UpperCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__UpperCAmelCase : List[Any] = importlib.import_module("""transformers""" )
if hasattr(_UpperCamelCase , _UpperCamelCase ):
return getattr(_UpperCamelCase , _UpperCamelCase )
return None
def lowerCamelCase ( _UpperCamelCase : Union[str, os.PathLike] , _UpperCamelCase : Optional[Union[str, os.PathLike]] = None , _UpperCamelCase : bool = False , _UpperCamelCase : bool = False , _UpperCamelCase : Optional[Dict[str, str]] = None , _UpperCamelCase : Optional[Union[bool, str]] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : bool = False , **_UpperCamelCase : List[str] , ) -> int:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = get_file_from_repo(
_UpperCamelCase , _UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , resume_download=_UpperCamelCase , proxies=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , local_files_only=_UpperCamelCase , )
if resolved_config_file is None:
logger.info(
"""Could not locate the image processor configuration file, will try to use the model config instead.""" )
return {}
with open(_UpperCamelCase , encoding="""utf-8""" ) as reader:
return json.load(_UpperCamelCase )
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
raise EnvironmentError(
"""AutoImageProcessor is designed to be instantiated """
"""using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.""" )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase )
def lowerCamelCase__ ( cls : List[str] , UpperCamelCase : List[Any] , **UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : List[str] = kwargs.pop("""config""" , UpperCamelCase )
__UpperCAmelCase : int = kwargs.pop("""trust_remote_code""" , UpperCamelCase )
__UpperCAmelCase : str = True
__UpperCAmelCase ,__UpperCAmelCase : str = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Tuple = config_dict.get("""image_processor_type""" , UpperCamelCase )
__UpperCAmelCase : str = None
if "AutoImageProcessor" in config_dict.get("""auto_map""" , {} ):
__UpperCAmelCase : Optional[int] = config_dict["""auto_map"""]["""AutoImageProcessor"""]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__UpperCAmelCase : str = config_dict.pop("""feature_extractor_type""" , UpperCamelCase )
if feature_extractor_class is not None:
logger.warning(
"""Could not find image processor class in the image processor config or the model config. Loading"""
""" based on pattern matching with the model's feature extractor configuration.""" )
__UpperCAmelCase : Optional[int] = feature_extractor_class.replace("""FeatureExtractor""" , """ImageProcessor""" )
if "AutoFeatureExtractor" in config_dict.get("""auto_map""" , {} ):
__UpperCAmelCase : Optional[Any] = config_dict["""auto_map"""]["""AutoFeatureExtractor"""]
__UpperCAmelCase : str = feature_extractor_auto_map.replace("""FeatureExtractor""" , """ImageProcessor""" )
logger.warning(
"""Could not find image processor auto map in the image processor config or the model config."""
""" Loading based on pattern matching with the model's feature extractor configuration.""" )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase , UpperCamelCase ):
__UpperCAmelCase : int = AutoConfig.from_pretrained(UpperCamelCase , **UpperCamelCase )
# It could be in `config.image_processor_type``
__UpperCAmelCase : Optional[int] = getattr(UpperCamelCase , """image_processor_type""" , UpperCamelCase )
if hasattr(UpperCamelCase , """auto_map""" ) and "AutoImageProcessor" in config.auto_map:
__UpperCAmelCase : Dict = config.auto_map["""AutoImageProcessor"""]
if image_processor_class is not None:
__UpperCAmelCase : Optional[int] = image_processor_class_from_name(UpperCamelCase )
__UpperCAmelCase : Optional[Any] = image_processor_auto_map is not None
__UpperCAmelCase : Tuple = image_processor_class is not None or type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING
__UpperCAmelCase : Any = resolve_trust_remote_code(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if has_remote_code and trust_remote_code:
__UpperCAmelCase : str = get_class_from_dynamic_module(
UpperCamelCase , UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Optional[Any] = kwargs.pop("""code_revision""" , UpperCamelCase )
if os.path.isdir(UpperCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING:
__UpperCAmelCase : int = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase )]
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase__ ( UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase , UpperCamelCase )
| 299 | 0 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
SCREAMING_SNAKE_CASE : List[Any] = TypeVar("""T""")
class A_ ( Generic[T] ):
def __init__( self : str , __SCREAMING_SNAKE_CASE : list[T] , __SCREAMING_SNAKE_CASE : Callable[[T, T], T] ):
__a = None
__a = len(__SCREAMING_SNAKE_CASE )
__a = [any_type for _ in range(self.N )] + arr
__a = fnc
self.build()
def _UpperCAmelCase ( self : Dict ):
for p in range(self.N - 1 , 0 , -1 ):
__a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _UpperCAmelCase ( self : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : T ):
p += self.N
__a = v
while p > 1:
__a = p // 2
__a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def _UpperCAmelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): # noqa: E741
__a , __a = l + self.N, r + self.N
__a = None
while l <= r:
if l % 2 == 1:
__a = self.st[l] if res is None else self.fn(__SCREAMING_SNAKE_CASE , self.st[l] )
if r % 2 == 0:
__a = self.st[r] if res is None else self.fn(__SCREAMING_SNAKE_CASE , self.st[r] )
__a , __a = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
SCREAMING_SNAKE_CASE : Dict = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
SCREAMING_SNAKE_CASE : List[str] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
SCREAMING_SNAKE_CASE : Dict = SegmentTree(test_array, min)
SCREAMING_SNAKE_CASE : int = SegmentTree(test_array, max)
SCREAMING_SNAKE_CASE : str = SegmentTree(test_array, lambda a, b: a + b)
def __A ( ):
"""simple docstring"""
for i in range(len(_A ) ):
for j in range(_A , len(_A ) ):
__a = reduce(_A , test_array[i : j + 1] )
__a = reduce(_A , test_array[i : j + 1] )
__a = reduce(lambda _A , _A : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_A , _A )
assert max_range == max_segment_tree.query(_A , _A )
assert sum_range == sum_segment_tree.query(_A , _A )
test_all_segments()
for index, value in test_updates.items():
SCREAMING_SNAKE_CASE : str = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 197 | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """ChineseCLIPImageProcessor"""
_SCREAMING_SNAKE_CASE = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : List[Any] ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __SCREAMING_SNAKE_CASE , )
__a = kwargs.pop("feature_extractor" )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = self.image_processor
def __call__( self : Optional[int] , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : List[Any] ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__a = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images is not None:
__a = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__SCREAMING_SNAKE_CASE ) , tensor_type=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : int , *__SCREAMING_SNAKE_CASE : Optional[Any] , **__SCREAMING_SNAKE_CASE : str ):
return self.tokenizer.batch_decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[Any] , *__SCREAMING_SNAKE_CASE : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[int] ):
return self.tokenizer.decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCAmelCase ( self : Any ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __SCREAMING_SNAKE_CASE , )
return self.image_processor_class
| 197 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _lowerCamelCase ( _a ):
"""simple docstring"""
if num <= 0:
_lowerCamelCase = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(__SCREAMING_SNAKE_CASE )
_lowerCamelCase = [True] * (num + 1)
_lowerCamelCase = []
_lowerCamelCase = 2
_lowerCamelCase = int(math.sqrt(__SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , __SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
_lowerCamelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 701 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False, False, False
@dataclass
class __magic_name__ :
"""simple docstring"""
_UpperCamelCase = None
_UpperCamelCase = True
_UpperCamelCase = True
_UpperCamelCase = None
# Automatically constructed
_UpperCamelCase = "dict"
_UpperCamelCase = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
_UpperCamelCase = field(default="Audio" ,init=lowercase_ ,repr=lowercase_ )
def __call__( self ):
return self.pa_type
def _UpperCAmelCase ( self , a__ ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(a__ , a__ ):
return {"bytes": None, "path": value}
elif isinstance(a__ , a__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase = BytesIO()
sf.write(a__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
_lowerCamelCase = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67
_lowerCamelCase = BytesIO(bytes() )
sf.write(a__ , a__ , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'''An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _UpperCAmelCase ( self , a__ , a__ = None ):
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
_lowerCamelCase , _lowerCamelCase = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f'''An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
_lowerCamelCase = xsplitext(a__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
_lowerCamelCase = token_per_repo_id or {}
_lowerCamelCase = path.split('''::''' )[-1]
try:
_lowerCamelCase = string_to_dict(a__ , config.HUB_DATASETS_URL )['''repo_id''']
_lowerCamelCase = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase = None
with xopen(a__ , '''rb''' , use_auth_token=a__ ) as f:
_lowerCamelCase , _lowerCamelCase = sf.read(a__ )
else:
_lowerCamelCase , _lowerCamelCase = sf.read(a__ )
_lowerCamelCase = array.T
if self.mono:
_lowerCamelCase = librosa.to_mono(a__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase = librosa.resample(a__ , orig_sr=a__ , target_sr=self.sampling_rate )
_lowerCamelCase = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _UpperCAmelCase ( self ):
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def _UpperCAmelCase ( self , a__ ):
if pa.types.is_string(storage.type ):
_lowerCamelCase = pa.array([None] * len(a__ ) , type=pa.binary() )
_lowerCamelCase = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase = pa.array([None] * len(a__ ) , type=pa.string() )
_lowerCamelCase = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
_lowerCamelCase = pa.array([Audio().encode_example(a__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
_lowerCamelCase = storage.field('''bytes''' )
else:
_lowerCamelCase = pa.array([None] * len(a__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
_lowerCamelCase = storage.field('''path''' )
else:
_lowerCamelCase = pa.array([None] * len(a__ ) , type=pa.string() )
_lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(a__ , self.pa_type )
def _UpperCAmelCase ( self , a__ ):
@no_op_if_value_is_null
def path_to_bytes(a__ ):
with xopen(a__ , '''rb''' ) as f:
_lowerCamelCase = f.read()
return bytes_
_lowerCamelCase = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_lowerCamelCase = pa.array(
[os.path.basename(a__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
_lowerCamelCase = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(a__ , self.pa_type )
| 297 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__A = ["""gpt2"""]
__A = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :List[str] = tokenizer
lowerCAmelCase__ :Optional[int] = AutoConfig.from_pretrained(__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = TFGPTaLMHeadModel.from_config(__UpperCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = self.tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :int = tokenized['input_ids'].to_tensor()
lowerCAmelCase__ :Optional[Any] = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowerCAmelCase__ :int = self.model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ :List[str] = [GPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowerCAmelCase__ :Union[str, Any] = [TFGPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCAmelCase__ :int = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
lowerCAmelCase__ :str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def snake_case ( self ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowerCAmelCase__ :int = tokenizer([test_inputs] , return_tensors='tf' )
lowerCAmelCase__ :Optional[int] = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowerCAmelCase__ :Optional[int] = python_outputs[key].numpy()
lowerCAmelCase__ :List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__UpperCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :Optional[int] = tf.function(__UpperCAmelCase )
for test_inputs in self.test_sentences:
lowerCAmelCase__ :Any = tf.constant(__UpperCAmelCase )
lowerCAmelCase__ :int = compiled_tokenizer(__UpperCAmelCase )
lowerCAmelCase__ :Any = tf_tokenizer(__UpperCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :List[str] = ModelToSave(tokenizer=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = model.serving(__UpperCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCAmelCase__ :Union[str, Any] = Path(__UpperCAmelCase ) / 'saved.model'
tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={'serving_default': model.serving} )
lowerCAmelCase__ :str = tf.saved_model.load(__UpperCAmelCase )
lowerCAmelCase__ :Dict = loaded_model.signatures['serving_default'](__UpperCAmelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
lowerCAmelCase__ :List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = tf_tokenizer(__UpperCAmelCase ) # Build model with some sample inputs
lowerCAmelCase__ :Union[str, Any] = tf_tokenizer.get_config()
lowerCAmelCase__ :Tuple = TFGPTaTokenizer.from_config(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = model_from_config(__UpperCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def snake_case ( self ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowerCAmelCase__ :int = 1_2_3_1_2_3
for max_length in [3, 5, 1_0_2_4]:
lowerCAmelCase__ :Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
lowerCAmelCase__ :List[str] = tf_tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 93 |
'''simple docstring'''
from pathlib import Path
import fire
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : int = Path(UpperCAmelCase_ )
_UpperCamelCase : str = Path(UpperCAmelCase_ )
dest_dir.mkdir(exist_ok=UpperCAmelCase_ )
for path in src_dir.iterdir():
_UpperCamelCase : int = [x.rstrip() for x in list(path.open().readlines() )][:n]
_UpperCamelCase : Any = dest_dir.joinpath(path.name )
print(UpperCAmelCase_ )
dest_path.open('w' ).write('\n'.join(UpperCAmelCase_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 195 | 0 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = 'Hello world! cécé herlolip'
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : str, _UpperCamelCase : bool ) -> Optional[int]:
A_ = FairseqRobertaModel.from_pretrained(_UpperCamelCase )
roberta.eval() # disable dropout
A_ = roberta.model.encoder.sentence_encoder
A_ = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings, hidden_size=roberta.cfg.model.encoder_embed_dim, num_hidden_layers=roberta.cfg.model.encoder_layers, num_attention_heads=roberta.cfg.model.encoder_attention_heads, intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim, max_position_embeddings=5_14, type_vocab_size=1, layer_norm_eps=1E-5, )
if classification_head:
A_ = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''', _UpperCamelCase )
A_ = XLMRobertaXLForSequenceClassification(_UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A_ = roberta_sent_encoder.embed_tokens.weight
A_ = roberta_sent_encoder.embed_positions.weight
A_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
A_ = roberta_sent_encoder.layer_norm.weight
A_ = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A_ = model.roberta.encoder.layer[i]
A_ = roberta_sent_encoder.layers[i]
A_ = layer.attention
A_ = roberta_layer.self_attn_layer_norm.weight
A_ = roberta_layer.self_attn_layer_norm.bias
# self attention
A_ = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
A_ = roberta_layer.self_attn.q_proj.weight
A_ = roberta_layer.self_attn.q_proj.bias
A_ = roberta_layer.self_attn.k_proj.weight
A_ = roberta_layer.self_attn.k_proj.bias
A_ = roberta_layer.self_attn.v_proj.weight
A_ = roberta_layer.self_attn.v_proj.bias
# self-attention output
A_ = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
A_ = roberta_layer.self_attn.out_proj.weight
A_ = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
A_ = roberta_layer.final_layer_norm.weight
A_ = roberta_layer.final_layer_norm.bias
# intermediate
A_ = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
A_ = roberta_layer.fca.weight
A_ = roberta_layer.fca.bias
# output
A_ = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
A_ = roberta_layer.fca.weight
A_ = roberta_layer.fca.bias
# end of layer
if classification_head:
A_ = roberta.model.classification_heads['''mnli'''].dense.weight
A_ = roberta.model.classification_heads['''mnli'''].dense.bias
A_ = roberta.model.classification_heads['''mnli'''].out_proj.weight
A_ = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
A_ = roberta.model.encoder.lm_head.dense.weight
A_ = roberta.model.encoder.lm_head.dense.bias
A_ = roberta.model.encoder.lm_head.layer_norm.weight
A_ = roberta.model.encoder.lm_head.layer_norm.bias
A_ = roberta.model.encoder.lm_head.weight
A_ = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
A_ = roberta.encode(_UpperCamelCase ).unsqueeze(0 ) # batch of size 1
A_ = model(_UpperCamelCase )[0]
if classification_head:
A_ = roberta.model.classification_heads['''mnli'''](roberta.extract_features(_UpperCamelCase ) )
else:
A_ = roberta.model(_UpperCamelCase )[0]
print(our_output.shape, their_output.shape )
A_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A_ = torch.allclose(_UpperCamelCase, _UpperCamelCase, atol=1E-3 )
print('''Do both models output the same tensors?''', '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(_UpperCamelCase ).mkdir(parents=_UpperCamelCase, exist_ok=_UpperCamelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
__snake_case : Union[str, Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 174 | '''simple docstring'''
from collections import defaultdict
from math import gcd
def _UpperCAmelCase ( _UpperCamelCase : int = 1_50_00_00 ) -> int:
A_ = defaultdict(_UpperCamelCase )
A_ = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1, _UpperCamelCase, 2 ):
if gcd(_UpperCamelCase, _UpperCamelCase ) > 1:
continue
A_ = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_UpperCamelCase, limit + 1, _UpperCamelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 174 | 1 |
"""simple docstring"""
import numpy as np
def __UpperCamelCase ( snake_case__ , snake_case__ ):
return np.where(vector > 0 , snake_case__ , (alpha * (np.exp(snake_case__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 180 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@register_to_config
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , ):
super().__init__()
A_ : Tuple = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : List[str] = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
A_ : Any = False
A_ : Tuple = nn.Dropout(p=lowerCAmelCase_ )
A_ : List[str] = TaConfig(
vocab_size=lowerCAmelCase_ , d_model=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , feed_forward_proj=lowerCAmelCase_ , is_decoder=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , )
A_ : Optional[Any] = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
A_ : Tuple = TaBlock(lowerCAmelCase_ )
self.encoders.append(lowerCAmelCase_ )
A_ : Any = TaLayerNorm(lowerCAmelCase_ )
A_ : Union[str, Any] = nn.Dropout(p=lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : List[Any] = self.token_embedder(lowerCAmelCase_ )
A_ : Optional[Any] = encoder_input_tokens.shape[1]
A_ : Any = torch.arange(lowerCAmelCase_ , device=encoder_input_tokens.device )
x += self.position_encoding(lowerCAmelCase_ )
A_ : Optional[Any] = self.dropout_pre(lowerCAmelCase_ )
# inverted the attention mask
A_ : int = encoder_input_tokens.size()
A_ : Optional[Any] = self.get_extended_attention_mask(lowerCAmelCase_ , lowerCAmelCase_ )
for lyr in self.encoders:
A_ : int = lyr(lowerCAmelCase_ , lowerCAmelCase_ )[0]
A_ : List[str] = self.layer_norm(lowerCAmelCase_ )
return self.dropout_post(lowerCAmelCase_ ), encoder_inputs_mask
| 180 | 1 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__lowerCAmelCase :Dict = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(__a ) , version.parse(__a ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def A ( UpperCAmelCase , UpperCAmelCase = None ):
_snake_case : Optional[int] = F"""\n{hint}""" if hint is not None else ""
# non-versioned check
if re.match(R"^[\w_\-\d]+$" , __a ):
_snake_case , _snake_case , _snake_case : Tuple = requirement, None, None
else:
_snake_case : Any = re.findall(R"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , __a )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F""" got {requirement}""" )
_snake_case , _snake_case : Any = match[0]
_snake_case : Dict = want_full.split("," ) # there could be multiple requirements
_snake_case : Optional[Any] = {}
for w in want_range:
_snake_case : str = re.findall(R"^([\s!=<>]{1,2})(.+)" , __a )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F""" but got {requirement}""" )
_snake_case , _snake_case : Tuple = match[0]
_snake_case : Optional[Any] = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
_snake_case : Union[str, Any] = ".".join([str(__a ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
return
# check if any version is installed
try:
_snake_case : Dict = importlib.metadata.version(__a )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__a , __a , __a , __a , __a , __a )
def A ( UpperCAmelCase ):
_snake_case : int = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(__a , __a ) | 720 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase :str = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase :Optional[int] = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 278 | 0 |
'''simple docstring'''
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__lowerCAmelCase = 2_0_4_8
__lowerCAmelCase = 4_0_9_6
__lowerCAmelCase = 4_2
__lowerCAmelCase = os.environ.pop("""PROCESS_TRAIN""", """false""")
__lowerCAmelCase = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def UpperCAmelCase_ (__a : Tuple ):
"""simple docstring"""
def choose_first(__a : List[Any] , __a : Union[str, Any]=False ):
assert isinstance(_snake_case , _snake_case )
if len(_snake_case ) == 1:
_a : int = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
_a : Any = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
_a : int = {'id': example['id']}
_a : List[Any] = example['annotations']
_a : Optional[Any] = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
_a : Any = ['yes'] if 1 in yes_no_answer else ['no']
_a : str = []
_a : List[Any] = []
_a : str = ['<cls>']
else:
_a : List[str] = ['short']
_a : int = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
_a : List[str] = ['long']
_a : Any = choose_first(annotation['long_answer'] , is_long_answer=_snake_case )
_a : Optional[int] = []
answer.update(_snake_case )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
_a : Optional[int] = True
else:
_a : List[str] = False
_a : Dict = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , _snake_case ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def UpperCAmelCase_ (__a : int , __a : Any=False ):
"""simple docstring"""
_a : Any = _get_single_answer(_snake_case )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_a : List[Any] = example['document']['tokens']
_a : str = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(_snake_case ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
_a : Optional[int] = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
_a : Any = example['document']['tokens']
_a : List[Any] = answer['start_token']
_a : List[str] = answer['end_token']
_a : Dict = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
_a : List[str] = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
_a : int = doc['is_html'][answer['start_token'] : answer['end_token']]
_a : Optional[Any] = doc['token'][answer['start_token'] : answer['end_token']]
_a : Union[str, Any] = ' '.join([old[i] for i in range(len(_snake_case ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , _snake_case , end='\n' )
print('Old:' , _snake_case , end='\n\n' )
return {
"context": " ".join(_snake_case ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def UpperCAmelCase_ (__a : Tuple , __a : int , __a : str=2_0_4_8 , __a : List[str]=4_0_9_6 , __a : str=True ):
"""simple docstring"""
_a : int = get_context_and_ans(_snake_case , assertion=_snake_case )
_a : str = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
_a : Optional[Any] = tokenizer(example['question']['text'] , out['context'] ).input_ids
_a : List[Any] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_a : Tuple = []
_a : Any = []
_a : int = input_ids[:q_len]
_a : Tuple = range(_snake_case , len(_snake_case ) , max_length - doc_stride )
for i in doc_start_indices:
_a : str = i + max_length - q_len
_a : Optional[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(_snake_case ),
"end_token": [-1_0_0] * len(_snake_case ),
"category": category,
},
}
_a : Optional[Any] = out['context'].split()
_a : List[Any] = splitted_context[answer['end_token']]
_a : Dict = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=_snake_case , ).input_ids )
_a : str = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=_snake_case ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
_a : Union[str, Any] = len(tokenizer(_snake_case , add_special_tokens=_snake_case ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
_a : Union[str, Any] = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
_a : Tuple = answer['start_token']
_a : List[Any] = answer['end_token']
if assertion:
_a : Optional[int] = tokenizer.decode(_snake_case )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , _snake_case , end='\n\n' )
if len(_snake_case ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
_a : List[str] = input_ids[:q_len]
_a : Any = range(_snake_case , len(_snake_case ) , max_length - doc_stride )
_a : Optional[Any] = []
_a : Any = []
_a : Union[str, Any] = []
_a : Optional[int] = [] # null, yes, no, long, short
for i in doc_start_indices:
_a : Union[str, Any] = i + max_length - q_len
_a : Dict = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
_a : Union[str, Any] = start_token - i + q_len
_a : Optional[int] = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
_a : Union[str, Any] = -1_0_0
_a : int = -1_0_0
answers_category.append('null' )
_a : Any = inputs[-1][start_token : end_token + 1]
answers_start_token.append(_snake_case )
answers_end_token.append(_snake_case )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(_snake_case ) )
print('Old:' , tokenizer.decode(_snake_case ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def UpperCAmelCase_ (__a : Tuple , __a : int , __a : Optional[Any]=2_0_4_8 , __a : Optional[int]=4_0_9_6 , __a : Any=False ):
"""simple docstring"""
_a : Union[str, Any] = get_strided_contexts_and_ans(
_snake_case , _snake_case , doc_stride=_snake_case , max_length=_snake_case , assertion=_snake_case , )
return example
def UpperCAmelCase_ (__a : List[Any] , __a : List[Any] ):
"""simple docstring"""
with jsonlines.open(_snake_case , 'a' ) as writer:
for example in tqdm(_snake_case , total=len(_snake_case ) , desc='Saving samples ... ' ):
_a : Optional[Any] = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__lowerCAmelCase = load_dataset("""natural_questions""")
__lowerCAmelCase = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
__lowerCAmelCase = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
__lowerCAmelCase = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
__lowerCAmelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__lowerCAmelCase = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
__lowerCAmelCase = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 229 |
"""simple docstring"""
from __future__ import annotations
import queue
class a :
def __init__( self , UpperCamelCase_ ):
UpperCAmelCase__ : int = data
UpperCAmelCase__ : Dict = None
UpperCAmelCase__ : Optional[int] = None
def lowerCamelCase ( ):
print('\n********Press N to stop entering at any point of time********\n' )
UpperCAmelCase__ : int = input('Enter the value of the root node: ' ).strip().lower()
UpperCAmelCase__ : queue.Queue = queue.Queue()
UpperCAmelCase__ : Dict = TreeNode(int(_snake_case ) )
q.put(_snake_case )
while not q.empty():
UpperCAmelCase__ : Tuple = q.get()
UpperCAmelCase__ : List[Any] = F'''Enter the left node of {node_found.data}: '''
UpperCAmelCase__ : Dict = input(_snake_case ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ : Optional[int] = TreeNode(int(_snake_case ) )
UpperCAmelCase__ : Tuple = left_node
q.put(_snake_case )
UpperCAmelCase__ : int = F'''Enter the right node of {node_found.data}: '''
UpperCAmelCase__ : int = input(_snake_case ).strip().lower() or 'n'
if check == "n":
return tree_node
UpperCAmelCase__ : int = TreeNode(int(_snake_case ) )
UpperCAmelCase__ : Any = right_node
q.put(_snake_case )
raise
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
print(node.data ,end=',' )
pre_order(node.left )
pre_order(node.right )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
in_order(node.left )
print(node.data ,end=',' )
in_order(node.right )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data ,end=',' )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ : queue.Queue = queue.Queue()
q.put(_snake_case )
while not q.empty():
UpperCAmelCase__ : int = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ : queue.Queue = queue.Queue()
q.put(_snake_case )
while not q.empty():
UpperCAmelCase__ : Optional[Any] = []
while not q.empty():
UpperCAmelCase__ : Union[str, Any] = q.get()
print(node_dequeued.data ,end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(_snake_case )
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ : list[TreeNode] = []
UpperCAmelCase__ : Optional[int] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data ,end=',' )
stack.append(_snake_case )
UpperCAmelCase__ : Optional[int] = n.left
# end of while means current node doesn't have left child
UpperCAmelCase__ : Union[str, Any] = stack.pop()
# start to traverse its right child
UpperCAmelCase__ : Dict = n.right
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ : list[TreeNode] = []
UpperCAmelCase__ : Optional[Any] = node
while n or stack:
while n:
stack.append(_snake_case )
UpperCAmelCase__ : int = n.left
UpperCAmelCase__ : Tuple = stack.pop()
print(n.data ,end=',' )
UpperCAmelCase__ : Tuple = n.right
def lowerCamelCase ( _snake_case ):
if not isinstance(_snake_case ,_snake_case ) or not node:
return
UpperCAmelCase__ , UpperCAmelCase__ : str = [], []
UpperCAmelCase__ : Union[str, Any] = node
stacka.append(_snake_case )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase__ : Tuple = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(_snake_case )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data ,end=',' )
def lowerCamelCase ( _snake_case = "" ,_snake_case=50 ,_snake_case="*" ):
if not s:
return "\n" + width * char
UpperCAmelCase__ , UpperCAmelCase__ : str = divmod(width - len(_snake_case ) - 2 ,2 )
return F'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
UpperCamelCase__ = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 110 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Tuple = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class __lowerCamelCase ( _UpperCamelCase ):
lowerCamelCase__: List[str] = '''swin2sr'''
lowerCamelCase__: Optional[Any] = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , __snake_case=6_4 , __snake_case=1 , __snake_case=3 , __snake_case=1_8_0 , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=[6, 6, 6, 6, 6, 6] , __snake_case=8 , __snake_case=2.0 , __snake_case=True , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.1 , __snake_case="gelu" , __snake_case=False , __snake_case=0.02 , __snake_case=1e-5 , __snake_case=2 , __snake_case=1.0 , __snake_case="1conv" , __snake_case="pixelshuffle" , **__snake_case , ) -> int:
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase: List[Any] = image_size
UpperCAmelCase: int = patch_size
UpperCAmelCase: Optional[int] = num_channels
UpperCAmelCase: List[str] = embed_dim
UpperCAmelCase: List[str] = depths
UpperCAmelCase: Optional[int] = len(_UpperCAmelCase )
UpperCAmelCase: Optional[Any] = num_heads
UpperCAmelCase: List[Any] = window_size
UpperCAmelCase: List[Any] = mlp_ratio
UpperCAmelCase: List[str] = qkv_bias
UpperCAmelCase: Dict = hidden_dropout_prob
UpperCAmelCase: Dict = attention_probs_dropout_prob
UpperCAmelCase: Optional[int] = drop_path_rate
UpperCAmelCase: str = hidden_act
UpperCAmelCase: Any = use_absolute_embeddings
UpperCAmelCase: Union[str, Any] = layer_norm_eps
UpperCAmelCase: Dict = initializer_range
UpperCAmelCase: Any = upscale
UpperCAmelCase: Union[str, Any] = img_range
UpperCAmelCase: int = resi_connection
UpperCAmelCase: Dict = upsampler
| 713 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCAmelCase ( snake_case_ : int = 3 ):
'''simple docstring'''
if isinstance(snake_case_ , snake_case_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(snake_case_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 1_0:
raise ValueError("number of qubits too large to simulate(>10)." )
UpperCAmelCase: Union[str, Any] = QuantumRegister(snake_case_ , "qr" )
UpperCAmelCase: str = ClassicalRegister(snake_case_ , "cr" )
UpperCAmelCase: Optional[int] = QuantumCircuit(snake_case_ , snake_case_ )
UpperCAmelCase: Dict = number_of_qubits
for i in range(snake_case_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , snake_case_ , snake_case_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case_ , snake_case_ )
# simulate with 10000 shots
UpperCAmelCase: Optional[Any] = Aer.get_backend("qasm_simulator" )
UpperCAmelCase: List[Any] = execute(snake_case_ , snake_case_ , shots=1_0_0_0_0 )
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
print(
f"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 166 | 0 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowercase : Union[str, Any] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowercase : Optional[int] = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.1_5},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
lowercase : int = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowercase : Dict = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowercase : Optional[int] = 'allenai'
def __a ( A__ ) -> List[str]:
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
lowerCAmelCase = dict((re.sub(r"@@$" , "" , A__ ), v) if k.endswith("@@" ) else (re.sub(r"$" , "</w>" , A__ ), v) for k, v in d.items() )
lowerCAmelCase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[f"{k}</w>"]
lowerCAmelCase = d[k] # restore
return da
def __a ( A__ , A__ ) -> Tuple:
# prep
assert os.path.exists(A__ )
os.makedirs(A__ , exist_ok=A__ )
print(f"Writing results to {pytorch_dump_folder_path}" )
# handle various types of models
lowerCAmelCase = basename(A__ )
lowerCAmelCase = dirname(A__ )
lowerCAmelCase = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowerCAmelCase = cls.hub_models()
lowerCAmelCase = {"bpe": "fastbpe", "tokenizer": "moses"}
lowerCAmelCase = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f"using checkpoint {checkpoint_file}" )
lowerCAmelCase = hub_utils.from_pretrained(
A__ , A__ , A__ , archive_map=A__ , **A__ )
lowerCAmelCase = vars(chkpt["args"]["model"] )
lowerCAmelCase = args["source_lang"]
lowerCAmelCase = args["target_lang"]
lowerCAmelCase = dirname(A__ )
lowerCAmelCase = basename(A__ )
# dicts
lowerCAmelCase = os.path.join(A__ , f"dict.{src_lang}.txt" )
lowerCAmelCase = os.path.join(A__ , f"dict.{tgt_lang}.txt" )
lowerCAmelCase = Dictionary.load(A__ )
lowerCAmelCase = rewrite_dict_keys(src_dict.indices )
lowerCAmelCase = len(A__ )
lowerCAmelCase = os.path.join(A__ , "vocab-src.json" )
print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records" )
with open(A__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(A__ , ensure_ascii=A__ , indent=A__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowerCAmelCase = True
for k in src_vocab.keys():
if not k.islower():
lowerCAmelCase = False
break
lowerCAmelCase = Dictionary.load(A__ )
lowerCAmelCase = rewrite_dict_keys(tgt_dict.indices )
lowerCAmelCase = len(A__ )
lowerCAmelCase = os.path.join(A__ , "vocab-tgt.json" )
print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records" )
with open(A__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(A__ , ensure_ascii=A__ , indent=A__ ) )
# merges_file (bpecodes)
lowerCAmelCase = os.path.join(A__ , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowerCAmelCase = os.path.join(A__ , A__ )
if os.path.exists(A__ ):
break
with open(A__ , encoding="utf-8" ) as fin:
lowerCAmelCase = fin.read()
lowerCAmelCase = re.sub(r" \d+$" , "" , A__ , 0 , re.M ) # remove frequency number
print(f"Generating {merges_file}" )
with open(A__ , "w" , encoding="utf-8" ) as fout:
fout.write(A__ )
# model config
lowerCAmelCase = os.path.join(A__ , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}"
assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}"
lowerCAmelCase = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
lowerCAmelCase = 5
lowerCAmelCase = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowerCAmelCase = best_score_hparams[model_dir]["length_penalty"]
else:
lowerCAmelCase = 1.0
print(f"Generating {fsmt_model_config_file}" )
with open(A__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(A__ , ensure_ascii=A__ , indent=A__ ) )
# tokenizer config
lowerCAmelCase = os.path.join(A__ , A__ )
lowerCAmelCase = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(f"Generating {fsmt_tokenizer_config_file}" )
with open(A__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(A__ , ensure_ascii=A__ , indent=A__ ) )
# model
lowerCAmelCase = chkpt["models"][0]
lowerCAmelCase = model.state_dict()
# rename keys to start with 'model.'
lowerCAmelCase = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowerCAmelCase = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(A__ , A__ )
lowerCAmelCase = FSMTConfig.from_pretrained(A__ )
lowerCAmelCase = FSMTForConditionalGeneration(A__ )
# check that it loads ok
model_new.load_state_dict(A__ , strict=A__ )
# save
lowerCAmelCase = os.path.join(A__ , A__ )
print(f"Generating {pytorch_weights_dump_path}" )
torch.save(A__ , A__ )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(f"cd {data_root}" )
print(f"transformers-cli upload {model_dir}" )
if __name__ == "__main__":
lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase : Optional[Any] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 649 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int=1_3 , SCREAMING_SNAKE_CASE : Any=7 , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : int=True , SCREAMING_SNAKE_CASE : int=9_9 , SCREAMING_SNAKE_CASE : int=3_2 , SCREAMING_SNAKE_CASE : Optional[Any]=5 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : str=3_7 , SCREAMING_SNAKE_CASE : List[Any]="gelu" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Union[str, Any]=5_1_2 , SCREAMING_SNAKE_CASE : str=1_6 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Any=0.0_2 , SCREAMING_SNAKE_CASE : Any=4 , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __A ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = FlaxRobertaModelTester(self )
@slow
def __A ( self : Any ) -> str:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained("roberta-base" , from_pt=SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
| 649 | 1 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 650 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = True
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """single_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """multi_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 10] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 650 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , _UpperCAmelCase : float , ) -> float:
__snake_case = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
__snake_case = 1 - (matter_density + radiation_density + dark_energy)
__snake_case = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__snake_case = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
a : List[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 69 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCamelCase : Tuple = "\nHuman: <<task>>\n\nAssistant: "
__lowerCamelCase : Tuple = "huggingface-tools/default-prompts"
__lowerCamelCase : List[str] = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="run" ) -> Union[str, Any]:
if prompt_or_repo_id is None:
UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , lowerCamelCase_ ) is not None:
return prompt_or_repo_id
UpperCAmelCase = cached_file(
lowerCamelCase_ , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(lowerCamelCase_ , "r" , encoding="utf-8" ) as f:
return f.read()
| 323 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
__UpperCamelCase = 42
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , _lowercase = 3 , _lowercase = 3 , _lowercase = ("DownEncoderBlock2D",) , _lowercase = ("UpDecoderBlock2D",) , _lowercase = (64,) , _lowercase = 1 , _lowercase = "silu" , _lowercase = 3 , _lowercase = 32 , _lowercase = 256 , _lowercase = 32 , _lowercase = None , _lowercase = 0.1_8215 , _lowercase = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCAmelCase_ : Union[str, Any] = Encoder(
in_channels=_a , out_channels=_a , down_block_types=_a , block_out_channels=_a , layers_per_block=_a , act_fn=_a , norm_num_groups=_a , double_z=_a , )
lowerCAmelCase_ : Optional[Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCAmelCase_ : Optional[Any] = nn.Convad(_a , _a , 1 )
lowerCAmelCase_ : int = VectorQuantizer(_a , _a , beta=0.25 , remap=_a , sane_index_shape=_a )
lowerCAmelCase_ : Dict = nn.Convad(_a , _a , 1 )
# pass init params to Decoder
lowerCAmelCase_ : Union[str, Any] = Decoder(
in_channels=_a , out_channels=_a , up_block_types=_a , block_out_channels=_a , layers_per_block=_a , act_fn=_a , norm_num_groups=_a , norm_type=_a , )
@apply_forward_hook
def UpperCAmelCase__ ( self , _lowercase , _lowercase = True ):
lowerCAmelCase_ : Union[str, Any] = self.encoder(_a )
lowerCAmelCase_ : Optional[Any] = self.quant_conv(_a )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_a )
@apply_forward_hook
def UpperCAmelCase__ ( self , _lowercase , _lowercase = False , _lowercase = True ):
if not force_not_quantize:
lowerCAmelCase_ : str = self.quantize(_a )
else:
lowerCAmelCase_ : int = h
lowerCAmelCase_ : int = self.post_quant_conv(_a )
lowerCAmelCase_ : List[str] = self.decoder(_a , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = True ):
lowerCAmelCase_ : Tuple = sample
lowerCAmelCase_ : Dict = self.encode(_a ).latents
lowerCAmelCase_ : List[Any] = self.decode(_a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
| 705 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : str = """▁"""
UpperCAmelCase_ : int = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase_ : Optional[Any] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model"""
),
}
}
UpperCAmelCase_ : Union[str, Any] = {
"""facebook/nllb-200-distilled-600M""": 10_24,
}
# fmt: off
UpperCAmelCase_ : Tuple = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class lowercase__ ( __A ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
__UpperCamelCase = []
__UpperCamelCase = []
def __init__( self , _lowercase , _lowercase="<s>" , _lowercase="</s>" , _lowercase="</s>" , _lowercase="<s>" , _lowercase="<unk>" , _lowercase="<pad>" , _lowercase="<mask>" , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = None , _lowercase=None , _lowercase=False , **_lowercase , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : Dict = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
lowerCAmelCase_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase_ : str = legacy_behaviour
super().__init__(
bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , tokenizer_file=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , additional_special_tokens=_lowercase , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=_lowercase , **_lowercase , )
lowerCAmelCase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowercase ) )
lowerCAmelCase_ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ : Optional[Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ : Dict = 1
lowerCAmelCase_ : Tuple = len(self.sp_model )
lowerCAmelCase_ : Any = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowercase )
}
lowerCAmelCase_ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase_ : List[str] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase_ : Optional[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase_ : Tuple = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCAmelCase_ : str = src_lang if src_lang is not None else """eng_Latn"""
lowerCAmelCase_ : List[str] = self.lang_code_to_id[self._src_lang]
lowerCAmelCase_ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
lowerCAmelCase_ : int = self.__dict__.copy()
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Any = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowercase ):
lowerCAmelCase_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowerCAmelCase_ : Optional[Any] = {}
lowerCAmelCase_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def UpperCAmelCase__ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase__ ( self ):
return self._src_lang
@src_lang.setter
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : Optional[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None , _lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
lowerCAmelCase_ : Optional[int] = [1] * len(self.prefix_tokens )
lowerCAmelCase_ : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowercase )) + suffix_ones
return prefix_ones + ([0] * len(_lowercase )) + ([0] * len(_lowercase )) + suffix_ones
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ):
lowerCAmelCase_ : Dict = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowerCAmelCase_ : Any = src_lang
lowerCAmelCase_ : str = self(_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , **_lowercase )
lowerCAmelCase_ : str = self.convert_tokens_to_ids(_lowercase )
lowerCAmelCase_ : Optional[Any] = tgt_lang_id
return inputs
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Union[str, Any] = {self.convert_ids_to_tokens(_lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self , _lowercase ):
return self.sp_model.encode(_lowercase , out_type=_lowercase )
def UpperCAmelCase__ ( self , _lowercase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ : Dict = self.sp_model.PieceToId(_lowercase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self , _lowercase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : List[Any] = """""".join(_lowercase ).replace(_lowercase , """ """ ).strip()
return out_string
def UpperCAmelCase__ ( self , _lowercase , _lowercase = None ):
if not os.path.isdir(_lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase_ : List[str] = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowercase , """wb""" ) as fi:
lowerCAmelCase_ : int = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (out_vocab_file,)
def UpperCAmelCase__ ( self , _lowercase , _lowercase = "eng_Latn" , _lowercase = None , _lowercase = "fra_Latn" , **_lowercase , ):
lowerCAmelCase_ : Optional[int] = src_lang
lowerCAmelCase_ : Dict = tgt_lang
return super().prepare_seqaseq_batch(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase__ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase__ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : Dict = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : Any = [self.cur_lang_code]
lowerCAmelCase_ : List[str] = [self.eos_token_id]
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : Any = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Dict = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase_ : List[str] = [self.cur_lang_code]
lowerCAmelCase_ : List[Any] = [self.eos_token_id]
| 440 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = None
_lowerCamelCase = None
def _SCREAMING_SNAKE_CASE ( ) -> Node | None:
_UpperCAmelCase = Node(1 )
_UpperCAmelCase = Node(2 )
_UpperCAmelCase = Node(3 )
_UpperCAmelCase = Node(4 )
_UpperCAmelCase = Node(5 )
return tree
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Sequence[Node | None]:
_UpperCAmelCase = []
if root is None:
return output
_UpperCAmelCase = deque([root] )
while process_queue:
_UpperCAmelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Sequence[Node | None]:
_UpperCAmelCase = []
def populate_output(__snake_case , __snake_case ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__snake_case , __snake_case )
return output
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Sequence[Node | None]:
_UpperCAmelCase = []
def populate_output(__snake_case , __snake_case ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__snake_case , __snake_case )
return output
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = height(__snake_case )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__snake_case , __snake_case ) )
_UpperCAmelCase = 1
else:
output.append(get_nodes_from_right_to_left(__snake_case , __snake_case ) )
_UpperCAmelCase = 0
return output
def _SCREAMING_SNAKE_CASE ( ) -> None: # Main function for testing.
_UpperCAmelCase = make_tree()
print(f"""In-order Traversal: {inorder(__snake_case )}""" )
print(f"""Pre-order Traversal: {preorder(__snake_case )}""" )
print(f"""Post-order Traversal: {postorder(__snake_case )}""" , """\n""" )
print(f"""Height of Tree: {height(__snake_case )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__snake_case ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__snake_case ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__snake_case , level=__snake_case ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 108 |
def _SCREAMING_SNAKE_CASE ( __snake_case = 1_0_0_0 ) -> int:
_UpperCAmelCase , _UpperCAmelCase = 1, 1
_UpperCAmelCase = []
for i in range(1 , n + 1 ):
_UpperCAmelCase = prev_numerator + 2 * prev_denominator
_UpperCAmelCase = prev_numerator + prev_denominator
if len(str(__snake_case ) ) > len(str(__snake_case ) ):
result.append(__snake_case )
_UpperCAmelCase = numerator
_UpperCAmelCase = denominator
return len(__snake_case )
if __name__ == "__main__":
print(F"{solution() = }") | 108 | 1 |
_lowerCamelCase = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages | 59 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Optional[Any] ):
SCREAMING_SNAKE_CASE__ = original_name.split(""".""" )[0]
SCREAMING_SNAKE_CASE__ = key.split(""".""" )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(UpperCamelCase__ ) - 2] )
SCREAMING_SNAKE_CASE__ = int(key_list[key_list.index(UpperCamelCase__ ) - 1] )
SCREAMING_SNAKE_CASE__ = orig_block_num - offset
SCREAMING_SNAKE_CASE__ = key.replace(f'''{orig_block_num}.{layer_num}.{original_name}''' , f'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int ):
SCREAMING_SNAKE_CASE__ = OrderedDict()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
SCREAMING_SNAKE_CASE__ = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
SCREAMING_SNAKE_CASE__ = key[: key.find("""proj""" )]
SCREAMING_SNAKE_CASE__ = key.replace(UpperCamelCase__ , f'''patch_embeddings.{total_embed_found}.''' )
SCREAMING_SNAKE_CASE__ = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
SCREAMING_SNAKE_CASE__ = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """norm1""" , """before_norm""" )
if "norm2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
SCREAMING_SNAKE_CASE__ = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
SCREAMING_SNAKE_CASE__ = key.replace("""head""" , """classifier""" )
SCREAMING_SNAKE_CASE__ = value
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[str] , UpperCamelCase__: Optional[int] , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = PoolFormerConfig()
# set attributes based on model_name
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ = model_name[-3:]
SCREAMING_SNAKE_CASE__ = 1_000
SCREAMING_SNAKE_CASE__ = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ = (1, 1_000)
# set config attributes
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
if size == "s12":
SCREAMING_SNAKE_CASE__ = [2, 2, 6, 2]
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s24":
SCREAMING_SNAKE_CASE__ = [4, 4, 12, 4]
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "s36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [64, 128, 320, 512]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9
elif size == "m36":
SCREAMING_SNAKE_CASE__ = [6, 6, 18, 6]
SCREAMING_SNAKE_CASE__ = [96, 192, 384, 768]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
elif size == "m48":
SCREAMING_SNAKE_CASE__ = [8, 8, 24, 8]
SCREAMING_SNAKE_CASE__ = [96, 192, 384, 768]
SCREAMING_SNAKE_CASE__ = 4.0
SCREAMING_SNAKE_CASE__ = 1e-6
SCREAMING_SNAKE_CASE__ = 0.9_5
else:
raise ValueError(f'''Size {size} not supported''' )
# load image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=UpperCamelCase__ )
# Prepare image
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
logger.info(f'''Converting model {model_name}...''' )
# load original state dict
SCREAMING_SNAKE_CASE__ = torch.load(UpperCamelCase__ , map_location=torch.device("""cpu""" ) )
# rename keys
SCREAMING_SNAKE_CASE__ = rename_keys(UpperCamelCase__ )
# create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ = PoolFormerForImageClassification(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Define image processor
SCREAMING_SNAKE_CASE__ = PoolFormerImageProcessor(crop_pct=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
SCREAMING_SNAKE_CASE__ = model(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = outputs.logits
# define expected logit slices for different models
if size == "s12":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
SCREAMING_SNAKE_CASE__ = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(f'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1e-2 )
# finally, save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowerCamelCase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path) | 59 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
_UpperCamelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
_UpperCamelCase = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
_UpperCamelCase = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_UpperCamelCase = model(__a )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , __a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __a , atol=1E-3 ) )
@slow
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
_UpperCamelCase = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
_UpperCamelCase = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
_UpperCamelCase = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_UpperCamelCase = model(__a )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , __a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __a , atol=1E-3 ) )
| 147 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 278 | 0 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = SwinConfig()
__lowercase = swin_name.split('''_''' )
__lowercase = name_split[1]
__lowercase = int(name_split[4] )
__lowercase = int(name_split[3][-1] )
if model_size == "tiny":
__lowercase = 96
__lowercase = (2, 2, 6, 2)
__lowercase = (3, 6, 12, 24)
elif model_size == "small":
__lowercase = 96
__lowercase = (2, 2, 18, 2)
__lowercase = (3, 6, 12, 24)
elif model_size == "base":
__lowercase = 1_28
__lowercase = (2, 2, 18, 2)
__lowercase = (4, 8, 16, 32)
else:
__lowercase = 1_92
__lowercase = (2, 2, 18, 2)
__lowercase = (6, 12, 24, 48)
if "in22k" in swin_name:
__lowercase = 2_18_41
else:
__lowercase = 10_00
__lowercase = '''huggingface/label-files'''
__lowercase = '''imagenet-1k-id2label.json'''
__lowercase = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) )
__lowercase = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = img_size
__lowercase = num_classes
__lowercase = embed_dim
__lowercase = depths
__lowercase = num_heads
__lowercase = window_size
return config
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if "patch_embed.proj" in name:
__lowercase = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__lowercase = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__lowercase = '''encoder.''' + name
if "attn.proj" in name:
__lowercase = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__lowercase = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__lowercase = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__lowercase = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__lowercase = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__lowercase = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
__lowercase = '''layernorm.weight'''
if name == "norm.bias":
__lowercase = '''layernorm.bias'''
if "head" in name:
__lowercase = name.replace('''head''' , '''classifier''' )
else:
__lowercase = '''swin.''' + name
return name
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowercase = orig_state_dict.pop(_UpperCamelCase )
if "mask" in key:
continue
elif "qkv" in key:
__lowercase = key.split('''.''' )
__lowercase = int(key_split[1] )
__lowercase = int(key_split[3] )
__lowercase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowercase = val[:dim, :]
__lowercase = val[
dim : dim * 2, :
]
__lowercase = val[-dim:, :]
else:
__lowercase = val[
:dim
]
__lowercase = val[
dim : dim * 2
]
__lowercase = val[
-dim:
]
else:
__lowercase = val
return orig_state_dict
def lowercase_ ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowercase = timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase )
timm_model.eval()
__lowercase = get_swin_config(_UpperCamelCase )
__lowercase = SwinForImageClassification(_UpperCamelCase )
model.eval()
__lowercase = convert_state_dict(timm_model.state_dict() , _UpperCamelCase )
model.load_state_dict(_UpperCamelCase )
__lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
__lowercase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
__lowercase = image_processor(images=_UpperCamelCase , return_tensors='''pt''' )
__lowercase = timm_model(inputs['''pixel_values'''] )
__lowercase = model(**_UpperCamelCase ).logits
assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 )
print(F'Saving model {swin_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swin_name''',
default='''swin_tiny_patch4_window7_224''',
type=str,
help='''Name of the Swin timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a : Optional[int] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 527 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
a : Optional[int] = datasets.logging.get_logger(__name__)
a : Tuple = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
a : Union[str, Any] = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
a : Union[str, Any] = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
a : Tuple = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def A ( self , snake_case_ ) -> List[str]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
__lowercase = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
__lowercase = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
__lowercase = self.config_name.upper()
else:
raise KeyError(
F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' )
# download the model checkpoint specified by self.config_name and set up the scorer
__lowercase = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
__lowercase = score.BleurtScorer(os.path.join(snake_case_ , snake_case_ ) )
def A ( self , snake_case_ , snake_case_ ) -> Tuple:
'''simple docstring'''
__lowercase = self.scorer.score(references=snake_case_ , candidates=snake_case_ )
return {"scores": scores}
| 527 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class lowerCamelCase (unittest.TestCase ):
def __init__( self : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any]=1_3 , __UpperCAmelCase : Optional[Any]=7 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Optional[int]=True , __UpperCAmelCase : List[str]=9_9 , __UpperCAmelCase : Any=3_2 , __UpperCAmelCase : List[Any]=5 , __UpperCAmelCase : int=4 , __UpperCAmelCase : List[Any]=3_7 , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : List[Any]=5_1_2 , __UpperCAmelCase : List[str]=1_6 , __UpperCAmelCase : Dict=2 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : str=4 , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_attention_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_choices
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : List[str] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = FlaxAlbertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class_name.from_pretrained("""albert-base-v2""" )
SCREAMING_SNAKE_CASE__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
@require_flax
class lowerCamelCase (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = FlaxAlbertModel.from_pretrained("""albert-base-v2""" )
SCREAMING_SNAKE_CASE__ = np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
SCREAMING_SNAKE_CASE__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
SCREAMING_SNAKE_CASE__ = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
SCREAMING_SNAKE_CASE__ = (1, 1_1, 7_6_8)
self.assertEqual(output.shape , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCamelCase , atol=1e-4 ) )
| 196 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case : List[str] = logging.get_logger(__name__)
def a_ ( __a ):
A__ = DPTConfig()
if "large" in checkpoint_url:
A__ = 1024
A__ = 4096
A__ = 24
A__ = 16
A__ = [5, 11, 17, 23]
A__ = [256, 512, 1024, 1024]
A__ = (1, 384, 384)
if "ade" in checkpoint_url:
A__ = True
A__ = 150
A__ = '''huggingface/label-files'''
A__ = '''ade20k-id2label.json'''
A__ = json.load(open(cached_download(hf_hub_url(__a , __a , repo_type='''dataset''' ) ) , '''r''' ) )
A__ = {int(__a ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = [1, 150, 480, 480]
return config, expected_shape
def a_ ( __a ):
A__ = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(__a , __a )
def a_ ( __a ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
A__ = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
A__ = name.replace('''patch_embed''' , '''patch_embeddings''' )
if "pos_embed" in name:
A__ = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
A__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
A__ = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
A__ = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
A__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
A__ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name:
A__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
A__ = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
A__ = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
A__ = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
A__ = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
A__ = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
A__ = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
A__ = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
A__ = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
A__ = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
A__ = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
A__ = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
A__ = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
A__ = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
A__ = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
A__ = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
A__ = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
A__ = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
A__ = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
A__ = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
A__ = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
A__ = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
A__ = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
A__ = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
A__ = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
return name
def a_ ( __a , __a ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def a_ ( ):
A__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A__ = Image.open(requests.get(__a , stream=__a ).raw )
return im
@torch.no_grad()
def a_ ( __a , __a , __a , __a ):
A__ , A__ = get_dpt_config(__a )
# load original state_dict from URL
A__ = torch.hub.load_state_dict_from_url(__a , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(__a )
# rename keys
for key in state_dict.copy().keys():
A__ = state_dict.pop(__a )
A__ = val
# read in qkv matrices
read_in_q_k_v(__a , __a )
# load HuggingFace model
A__ = DPTForSemanticSegmentation(__a ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(__a )
model.load_state_dict(__a )
model.eval()
# Check outputs on an image
A__ = 480 if '''ade''' in checkpoint_url else 384
A__ = DPTImageProcessor(size=__a )
A__ = prepare_img()
A__ = image_processor(__a , return_tensors='''pt''' )
# forward pass
A__ = model(**__a ).logits if '''ade''' in checkpoint_url else model(**__a ).predicted_depth
# Assert logits
A__ = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
A__ = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(__a )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __a , atol=1e-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __a )
)
Path(__a ).mkdir(exist_ok=__a )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__a )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model to hub...''' )
model.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=__a , )
image_processor.push_to_hub(
repo_path_or_name=Path(__a , __a ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=__a , )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
__snake_case : Dict = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 571 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : str = {"""configuration_wavlm""": ["""WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WavLMConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = [
"""WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WavLMForAudioFrameClassification""",
"""WavLMForCTC""",
"""WavLMForSequenceClassification""",
"""WavLMForXVector""",
"""WavLMModel""",
"""WavLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_a : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 111 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _UpperCAmelCase :
"""simple docstring"""
A = MBartConfig
A = {}
A = '''gelu'''
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=2 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=20 , _lowerCAmelCase=2 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = parent
lowerCAmelCase__ :Dict = batch_size
lowerCAmelCase__ :Optional[int] = seq_length
lowerCAmelCase__ :Any = is_training
lowerCAmelCase__ :List[Any] = use_labels
lowerCAmelCase__ :Optional[int] = vocab_size
lowerCAmelCase__ :Optional[int] = hidden_size
lowerCAmelCase__ :Optional[Any] = num_hidden_layers
lowerCAmelCase__ :List[str] = num_attention_heads
lowerCAmelCase__ :List[str] = intermediate_size
lowerCAmelCase__ :List[str] = hidden_dropout_prob
lowerCAmelCase__ :Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ :Tuple = max_position_embeddings
lowerCAmelCase__ :Optional[Any] = eos_token_id
lowerCAmelCase__ :int = pad_token_id
lowerCAmelCase__ :Any = bos_token_id
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase__ :List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase__ :Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase__ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ :Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase__ :Union[str, Any] = prepare_mbart_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = TFMBartModel(config=_lowerCAmelCase ).get_decoder()
lowerCAmelCase__ :Tuple = inputs_dict["input_ids"]
lowerCAmelCase__ :Dict = input_ids[:1, :]
lowerCAmelCase__ :Dict = inputs_dict["attention_mask"][:1, :]
lowerCAmelCase__ :List[str] = inputs_dict["head_mask"]
lowerCAmelCase__ :Optional[Any] = 1
# first forward pass
lowerCAmelCase__ :List[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , head_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ :Any = outputs.to_tuple()
lowerCAmelCase__ :Tuple = past_key_values[1]
def snake_case__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Dict=None , UpperCAmelCase : Dict=None , ):
if attention_mask is None:
lowerCAmelCase__ :str = tf.cast(tf.math.not_equal(UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase__ :int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase__ :str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase__ :Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase__ :Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
"""simple docstring"""
A = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A = (
{
'''conversational''': TFMBartForConditionalGeneration,
'''feature-extraction''': TFMBartModel,
'''summarization''': TFMBartForConditionalGeneration,
'''text2text-generation''': TFMBartForConditionalGeneration,
'''translation''': TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A = True
A = False
A = False
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = TFMBartModelTester(self )
lowerCAmelCase__ :Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A = [
''' UN Chief Says There Is No Military Solution in Syria''',
]
A = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
]
A = '''facebook/mbart-large-en-ro'''
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def snake_case_ ( self , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.translate_src_text(**_lowerCAmelCase )
self.assertListEqual(self.expected_text , _lowerCAmelCase )
def snake_case_ ( self , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.tokenizer(self.src_text , **_lowerCAmelCase , return_tensors="tf" )
lowerCAmelCase__ :str = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
lowerCAmelCase__ :int = self.tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
return generated_words
@slow
def snake_case_ ( self ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 111 | 1 |
import math
import sys
import cva
import numpy as np
def __lowerCAmelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : float ) -> List[Any]:
__lowerCAmelCase =math.sqrt(_A )
__lowerCAmelCase =1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __lowerCAmelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> List[str]:
__lowerCAmelCase =kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : float ) -> str:
__lowerCAmelCase =np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
__lowerCAmelCase =math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def __lowerCAmelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : float , __lowerCamelCase : float , __lowerCamelCase : int , ) -> Union[str, Any]:
__lowerCAmelCase =np.zeros(img.shape )
__lowerCAmelCase =get_gauss_kernel(_A , _A )
__lowerCAmelCase , __lowerCAmelCase =img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowerCAmelCase =get_slice(_A , _A , _A , _A )
__lowerCAmelCase =img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowerCAmelCase =vec_gaussian(_A , _A )
__lowerCAmelCase =np.multiply(_A , _A )
__lowerCAmelCase =np.multiply(_A , _A )
__lowerCAmelCase =np.sum(_A ) / np.sum(_A )
__lowerCAmelCase =val
return imga
def __lowerCAmelCase ( __lowerCamelCase : list ) -> Optional[Any]:
__lowerCAmelCase =args[1] if args[1:] else """../image_data/lena.jpg"""
__lowerCAmelCase =float(args[2] ) if args[2:] else 1.0
__lowerCAmelCase =float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowerCAmelCase =int(args[4] )
__lowerCAmelCase =kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowerCAmelCase =5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase_ = parse_args(sys.argv)
lowercase_ = cva.imread(filename, 0)
cva.imshow('''input image''', img)
lowercase_ = img / 2_55
lowercase_ = out.astype('''float32''')
lowercase_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase_ = out * 2_55
lowercase_ = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 354 |
import math
import flax.linen as nn
import jax.numpy as jnp
def UpperCamelCase__ ( _A: jnp.ndarray , _A: int , _A: float = 1 , _A: float = 1 , _A: float = 1.0e4 , _A: bool = False , _A: float = 1.0 , ):
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
__lowerCamelCase = float(embedding_dim // 2 )
__lowerCamelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__lowerCamelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
__lowerCamelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
__lowerCamelCase = scale * emb
if flip_sin_to_cos:
__lowerCamelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
__lowerCamelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
__lowerCamelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class UpperCamelCase_ ( nn.Module ):
"""simple docstring"""
A = 32
A = jnp.floataa
@nn.compact
def __call__( self , UpperCAmelCase ):
__lowerCamelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(UpperCAmelCase )
__lowerCamelCase = nn.silu(UpperCAmelCase )
__lowerCamelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(UpperCAmelCase )
return temb
class UpperCamelCase_ ( nn.Module ):
"""simple docstring"""
A = 32
A = False
A = 1
@nn.compact
def __call__( self , UpperCAmelCase ):
return get_sinusoidal_embeddings(
UpperCAmelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 479 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __A( __UpperCAmelCase , unittest.TestCase ):
__A = LongformerTokenizer
__A = True
__A = LongformerTokenizerFast
__A = True
def _UpperCamelCase ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCamelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
_UpperCamelCase = dict(zip(A, range(len(A ) ) ) )
_UpperCamelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
_UpperCamelCase = {'''unk_token''': '''<unk>'''}
_UpperCamelCase = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCamelCase = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file, '''w''', encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def _UpperCamelCase ( self, **A ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname, **A )
def _UpperCamelCase ( self, **A ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **A )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = '''lower newer'''
return input_text, output_text
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
_UpperCamelCase = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A, A )
_UpperCamelCase = tokens + [tokenizer.unk_token]
_UpperCamelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ), A )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''', add_special_tokens=A ), [0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''', add_special_tokens=A ), [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2], )
@slow
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
_UpperCamelCase = tokenizer.encode('''sequence builders''', add_special_tokens=A )
_UpperCamelCase = tokenizer.encode('''multi-sequence build''', add_special_tokens=A )
_UpperCamelCase = tokenizer.encode(
'''sequence builders''', add_special_tokens=A, add_prefix_space=A )
_UpperCamelCase = tokenizer.encode(
'''sequence builders''', '''multi-sequence build''', add_special_tokens=A, add_prefix_space=A )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(A, A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = '''Encode this sequence.'''
_UpperCamelCase = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
_UpperCamelCase = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A, A )
_UpperCamelCase = tokenizer.encode(A, add_special_tokens=A, add_prefix_space=A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A, A )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
_UpperCamelCase = tokenizer.encode(A, add_special_tokens=A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A, A )
# Testing spaces after special tokens
_UpperCamelCase = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(A, lstrip=A, rstrip=A )} ) # mask token has a left space
_UpperCamelCase = tokenizer.convert_tokens_to_ids(A )
_UpperCamelCase = '''Encode <mask> sequence'''
_UpperCamelCase = '''Encode <mask>sequence'''
_UpperCamelCase = tokenizer.encode(A )
_UpperCamelCase = encoded.index(A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A, A )
_UpperCamelCase = tokenizer.encode(A )
_UpperCamelCase = encoded.index(A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A, A )
def _UpperCamelCase ( self ):
"""simple docstring"""
pass
def _UpperCamelCase ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(A, **A )
_UpperCamelCase = self.tokenizer_class.from_pretrained(A, **A )
_UpperCamelCase = '''A, <mask> AllenNLP sentence.'''
_UpperCamelCase = tokenizer_r.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
_UpperCamelCase = tokenizer_p.encode_plus(A, add_special_tokens=A, return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ), sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ), sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ), )
_UpperCamelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
_UpperCamelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''], [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
A, ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
A, ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _UpperCamelCase ( self ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2 ):
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname, use_fast=A, add_prefix_space=A, trim_offsets=A )
_UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_UpperCamelCase = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''], A )
self.assertEqual(post_processor_state['''add_prefix_space'''], A )
self.assertEqual(post_processor_state['''trim_offsets'''], A )
def _UpperCamelCase ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCamelCase = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_UpperCamelCase = F'''{text_of_1_token} {text_of_1_token}'''
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
_UpperCamelCase = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
_UpperCamelCase = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ) + 1, len(A ) + 1 + len(A )), )
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
_UpperCamelCase = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
_UpperCamelCase = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (len(A ), len(A ) + 1 + len(A )), )
_UpperCamelCase = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
_UpperCamelCase = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )), )
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
_UpperCamelCase = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
_UpperCamelCase = self.rust_tokenizer_class.from_pretrained(
A, use_fast=A, add_prefix_space=A, trim_offsets=A )
_UpperCamelCase = tokenizer_r(A, return_offsets_mapping=A, add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(A ), 1 + len(A ) + 1 + len(A )), )
| 105 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __A( unittest.TestCase ):
def _UpperCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''', revision='''bf16''', dtype=jnp.bfloataa, )
_UpperCamelCase = '''A painting of a squirrel eating a burger'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = num_samples * [prompt]
_UpperCamelCase = sd_pipe.prepare_inputs(A )
_UpperCamelCase = replicate(A )
_UpperCamelCase = shard(A )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(A, jax.device_count() )
_UpperCamelCase = sd_pipe(A, A, A, num_inference_steps=25, jit=A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = '''stabilityai/stable-diffusion-2'''
_UpperCamelCase , _UpperCamelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(A, subfolder='''scheduler''' )
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
A, scheduler=A, revision='''bf16''', dtype=jnp.bfloataa, )
_UpperCamelCase = scheduler_params
_UpperCamelCase = '''A painting of a squirrel eating a burger'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = num_samples * [prompt]
_UpperCamelCase = sd_pipe.prepare_inputs(A )
_UpperCamelCase = replicate(A )
_UpperCamelCase = shard(A )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(A, jax.device_count() )
_UpperCamelCase = sd_pipe(A, A, A, num_inference_steps=25, jit=A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 105 | 1 |
from __future__ import annotations
from random import random
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : int | None = None ) -> Optional[int]:
lowerCAmelCase__ = value
lowerCAmelCase__ = random()
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def __repr__( self : List[Any] ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return f'\'{self.value}: {self.prior:.5}\''
else:
return pformat(
{f'{self.value}: {self.prior:.5}': (self.left, self.right)} , indent=1 )
def __str__( self : Dict ) -> str:
lowerCAmelCase__ = str(self.value ) + " "
lowerCAmelCase__ = str(self.left or "" )
lowerCAmelCase__ = str(self.right or "" )
return value + left + right
def _A ( lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ):
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
lowerCAmelCase__ , lowerCAmelCase__ = split(root.left , lowerCAmelCase_ )
return left, root
else:
lowerCAmelCase__ , lowerCAmelCase__ = split(root.right , lowerCAmelCase_ )
return root, right
def _A ( lowerCAmelCase_ : Node | None , lowerCAmelCase_ : Node | None ):
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
lowerCAmelCase__ = merge(left.right , lowerCAmelCase_ )
return left
else:
lowerCAmelCase__ = merge(lowerCAmelCase_ , right.left )
return right
def _A ( lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = Node(lowerCAmelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = split(lowerCAmelCase_ , lowerCAmelCase_ )
return merge(merge(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Node | None , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = split(lowerCAmelCase_ , value - 1 )
lowerCAmelCase__ , lowerCAmelCase__ = split(lowerCAmelCase_ , lowerCAmelCase_ )
return merge(lowerCAmelCase_ , lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : Node | None ):
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def _A ( lowerCAmelCase_ : Node | None , lowerCAmelCase_ : str ):
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
lowerCAmelCase__ = insert(lowerCAmelCase_ , int(arg[1:] ) )
elif arg[0] == "-":
lowerCAmelCase__ = erase(lowerCAmelCase_ , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
lowerCAmelCase__ = input()
while args != "q":
lowerCAmelCase__ = interact_treap(lowerCAmelCase_ , lowerCAmelCase_ )
print(lowerCAmelCase_ )
lowerCAmelCase__ = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 61 | '''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any=1_3 , lowerCAmelCase__ : List[Any]=3_2 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Any=1_6 , lowerCAmelCase__ : Optional[int]=[1, 2, 1] , lowerCAmelCase__ : Tuple=[2, 2, 4] , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : Optional[Any]=2.0 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Optional[int]=0.0 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : int="gelu" , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Union[str, Any]=0.02 , lowerCAmelCase__ : Tuple=1e-5 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[Any]=1_0 , lowerCAmelCase__ : Union[str, Any]=8 , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : List[Any] = patch_size
_UpperCAmelCase : Optional[int] = num_channels
_UpperCAmelCase : Optional[Any] = embed_dim
_UpperCAmelCase : int = depths
_UpperCAmelCase : Tuple = num_heads
_UpperCAmelCase : int = window_size
_UpperCAmelCase : Optional[int] = mlp_ratio
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : List[str] = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = drop_path_rate
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : Dict = use_absolute_embeddings
_UpperCAmelCase : Optional[Any] = patch_norm
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : Union[str, Any] = is_training
_UpperCAmelCase : int = scope
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : Union[str, Any] = type_sequence_label_size
_UpperCAmelCase : Dict = encoder_stride
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : str = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : int = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = SwinvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Any = model(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = SwinvaForMaskedImageModeling(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : int = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : Union[str, Any] = SwinvaForMaskedImageModeling(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.type_sequence_label_size
_UpperCAmelCase : Dict = SwinvaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Any = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
UpperCamelCase_ : Dict = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : int = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Optional[int] = False
UpperCamelCase_ : List[Any] = False
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = SwinvaModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=lowerCAmelCase__ , embed_dim=3_7 )
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0." )
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="Swinv2 does not use inputs_embeds" )
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def _lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : int = True
for model_class in self.all_model_classes:
_UpperCAmelCase : str = True
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : int = True
_UpperCAmelCase : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : int = outputs.attentions
_UpperCAmelCase : Optional[int] = len(self.model_tester.depths )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Tuple = config.window_size**2
_UpperCAmelCase : str = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : int = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Tuple = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
_UpperCAmelCase : int = True
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Any = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
if hasattr(self.model_tester , "num_hidden_states_types" ):
_UpperCAmelCase : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
_UpperCAmelCase : Dict = 2
self.assertEqual(out_len + added_hidden_states , len(lowerCAmelCase__ ) )
_UpperCAmelCase : List[Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : str = outputs.hidden_states
_UpperCAmelCase : Union[str, Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# Swinv2 has a different seq_length
_UpperCAmelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_UpperCAmelCase : Dict = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = reshaped_hidden_states[0].shape
_UpperCAmelCase : List[Any] = (
reshaped_hidden_states[0].view(lowerCAmelCase__ , lowerCAmelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : int = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : str = 3
_UpperCAmelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Tuple = True
self.check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , (padded_height, padded_width) )
def _lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Dict = SwinvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Tuple = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = SwinvaForImageClassification.from_pretrained("microsoft/swinv2-tiny-patch4-window8-256" ).to(
lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase : Any = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Dict = model(**lowerCAmelCase__ )
# verify the logits
_UpperCAmelCase : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) ) | 494 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( __snake_case ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''LayoutLMv2ImageProcessor'''
UpperCamelCase = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[Any] , __lowerCamelCase : str=None , __lowerCamelCase : Any=None , **__lowerCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __lowerCamelCase , )
UpperCAmelCase = kwargs.pop("""feature_extractor""" )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__lowerCamelCase , __lowerCamelCase )
def __call__( self : str , __lowerCamelCase : int , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __lowerCamelCase : Union[List[List[int]], List[List[List[int]]]] = None , __lowerCamelCase : Optional[Union[List[int], List[List[int]]]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : Dict , ) -> BatchEncoding:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""" )
# first, apply the image processor
UpperCAmelCase = self.image_processor(images=__lowerCamelCase , return_tensors=__lowerCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase = features["""words"""]
UpperCAmelCase = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
# add pixel values
UpperCAmelCase = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
UpperCAmelCase = self.get_overflowing_images(__lowerCamelCase , encoded_inputs["""overflow_to_sample_mapping"""] )
UpperCAmelCase = images
return encoded_inputs
def _lowercase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F""" {len(__lowerCamelCase )} and {len(__lowerCamelCase )}""" )
return images_with_overflow
def _lowercase ( self : Dict , *__lowerCamelCase : Tuple , **__lowerCamelCase : Dict ) -> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowercase ( self : Optional[int] , *__lowerCamelCase : Any , **__lowerCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __lowerCamelCase , )
return self.image_processor_class
@property
def _lowercase ( self : str ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __lowerCamelCase , )
return self.image_processor
| 627 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class __lowercase ( __snake_case ):
UpperCamelCase = '''ctrl'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str , __lowerCamelCase : Optional[int]=2_4_6_5_3_4 , __lowerCamelCase : Union[str, Any]=2_5_6 , __lowerCamelCase : int=1_2_8_0 , __lowerCamelCase : Optional[Any]=8_1_9_2 , __lowerCamelCase : List[str]=4_8 , __lowerCamelCase : Dict=1_6 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Union[str, Any]=1e-6 , __lowerCamelCase : str=0.02 , __lowerCamelCase : Union[str, Any]=True , **__lowerCamelCase : List[str] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = vocab_size
UpperCAmelCase = n_positions
UpperCAmelCase = n_embd
UpperCAmelCase = n_layer
UpperCAmelCase = n_head
UpperCAmelCase = dff
UpperCAmelCase = resid_pdrop
UpperCAmelCase = embd_pdrop
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = use_cache
super().__init__(**__lowerCamelCase )
| 627 | 1 |
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
return abs(snake_case__ ) if a == 0 else greatest_common_divisor(b % a , snake_case__ )
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
while y: # --> when y=0 then loop will terminate and return x as final GCD.
A , A = y, x % y
return abs(snake_case__ )
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
try:
A = input('Enter two integers separated by comma (,): ' ).split(',' )
A = int(nums[0] )
A = int(nums[1] )
print(
F'''greatest_common_divisor({num_a}, {num_a}) = '''
F'''{greatest_common_divisor(snake_case__ , snake_case__ )}''' )
print(F'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(snake_case__ , snake_case__ )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main() | 106 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 169 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'lilt'
def __init__( self: Dict , UpperCamelCase_: str=3_05_22 , UpperCamelCase_: List[str]=7_68 , UpperCamelCase_: Union[str, Any]=12 , UpperCamelCase_: Optional[Any]=12 , UpperCamelCase_: Optional[Any]=30_72 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: Any=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Dict=5_12 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: str=0.02 , UpperCamelCase_: List[Any]=1E-12 , UpperCamelCase_: Dict=0 , UpperCamelCase_: List[Any]="absolute" , UpperCamelCase_: List[Any]=None , UpperCamelCase_: List[Any]=4 , UpperCamelCase_: Dict=10_24 , **UpperCamelCase_: Optional[int] , ):
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = classifier_dropout
__lowerCamelCase = channel_shrink_ratio
__lowerCamelCase = max_ad_position_embeddings
| 710 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 80 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowercase : List[str] = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
__lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 476 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def snake_case ( A__ ):
def wrapper(*A__ ,**A__ ):
UpperCAmelCase_ : Union[str, Any] = timeit.default_timer()
UpperCAmelCase_ : Union[str, Any] = func(*A__ ,**A__ )
UpperCAmelCase_ : Dict = timeit.default_timer() - starttime
return delta
UpperCAmelCase_ : Optional[Any] = func.__name__
return wrapper
def snake_case ( A__ ,A__=1_00 ,A__=None ):
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[str] = seq_shapes or {}
for i in range(A__ ):
UpperCAmelCase_ : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(A__ ,_ArrayXD ):
UpperCAmelCase_ : Optional[int] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(A__ ,datasets.Value ):
if v.dtype == "string":
UpperCAmelCase_ : List[str] = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCAmelCase_ : List[str] = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(A__ ,datasets.Sequence ):
while isinstance(A__ ,datasets.Sequence ):
UpperCAmelCase_ : List[str] = v.feature
UpperCAmelCase_ : List[Any] = seq_shapes[k]
UpperCAmelCase_ : Dict = np.random.rand(*A__ ).astype(v.dtype )
UpperCAmelCase_ : Dict = data
dummy_data.append((i, example) )
return dummy_data
def snake_case ( A__ ,A__ ,A__=1_00 ,A__=None ):
UpperCAmelCase_ : Optional[Any] = generate_examples(A__ ,num_examples=A__ ,seq_shapes=A__ )
with ArrowWriter(features=A__ ,path=A__ ) as writer:
for key, record in dummy_data:
UpperCAmelCase_ : Any = features.encode_example(A__ )
writer.write(A__ )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
UpperCAmelCase_ : List[Any] = datasets.Dataset.from_file(filename=A__ ,info=datasets.DatasetInfo(features=A__ ) )
return dataset
| 95 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=snake_case__ , vae=snake_case__ , scheduler=snake_case__ )
# create a imagenet -> id dictionary for easier use
UpperCamelCase__ :Union[str, Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
UpperCamelCase__ :Optional[int] = int(snake_case__ )
UpperCamelCase__ :Optional[Any] = dict(sorted(self.labels.items() ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ):
UpperCamelCase__ :int = list(snake_case__ )
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , UpperCamelCase_ , UpperCamelCase_ = 4.0 , UpperCamelCase_ = None , UpperCamelCase_ = 50 , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = len(snake_case__ )
UpperCamelCase__ :Any = self.transformer.config.sample_size
UpperCamelCase__ :List[Any] = self.transformer.config.in_channels
UpperCamelCase__ :List[Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=snake_case__ , device=self.device , dtype=self.transformer.dtype , )
UpperCamelCase__ :str = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCamelCase__ :List[str] = torch.tensor(snake_case__ , device=self.device ).reshape(-1 )
UpperCamelCase__ :Optional[Any] = torch.tensor([1000] * batch_size , device=self.device )
UpperCamelCase__ :int = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(snake_case__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCamelCase__ :Optional[Any] = latent_model_input[: len(snake_case__ ) // 2]
UpperCamelCase__ :Tuple = torch.cat([half, half] , dim=0 )
UpperCamelCase__ :Optional[int] = self.scheduler.scale_model_input(snake_case__ , snake_case__ )
UpperCamelCase__ :Union[str, Any] = t
if not torch.is_tensor(snake_case__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCamelCase__ :Any = latent_model_input.device.type == "mps"
if isinstance(snake_case__ , snake_case__ ):
UpperCamelCase__ :Tuple = torch.floataa if is_mps else torch.floataa
else:
UpperCamelCase__ :Tuple = torch.intaa if is_mps else torch.intaa
UpperCamelCase__ :List[Any] = torch.tensor([timesteps] , dtype=snake_case__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCamelCase__ :Dict = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ :Optional[int] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCamelCase__ :Optional[Any] = self.transformer(
snake_case__ , timestep=snake_case__ , class_labels=snake_case__ ).sample
# perform guidance
if guidance_scale > 1:
UpperCamelCase__ :Optional[int] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCamelCase__ :Dict = torch.split(snake_case__ , len(snake_case__ ) // 2 , dim=0 )
UpperCamelCase__ :Optional[Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCamelCase__ :Tuple = torch.cat([half_eps, half_eps] , dim=0 )
UpperCamelCase__ :Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCamelCase__ :Dict = torch.split(snake_case__ , snake_case__ , dim=1 )
else:
UpperCamelCase__ :str = noise_pred
# compute previous image: x_t -> x_t-1
UpperCamelCase__ :Any = self.scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
if guidance_scale > 1:
UpperCamelCase__ :Any = latent_model_input.chunk(2 , dim=0 )
else:
UpperCamelCase__ :List[str] = latent_model_input
UpperCamelCase__ :Any = 1 / self.vae.config.scaling_factor * latents
UpperCamelCase__ :Dict = self.vae.decode(snake_case__ ).sample
UpperCamelCase__ :Dict = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase__ :Tuple = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ :Tuple = self.numpy_to_pil(snake_case__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=snake_case__ ) | 719 |
'''simple docstring'''
import torch
from torch import nn
class lowercase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1 , UpperCamelCase_=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Dict = n_token
UpperCamelCase__ :List[Any] = d_embed
UpperCamelCase__ :Dict = d_proj
UpperCamelCase__ :Dict = cutoffs + [n_token]
UpperCamelCase__ :Union[str, Any] = [0] + self.cutoffs
UpperCamelCase__ :Any = div_val
UpperCamelCase__ :int = self.cutoffs[0]
UpperCamelCase__ :List[Any] = len(self.cutoffs ) - 1
UpperCamelCase__ :List[Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase__ :Any = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase__ :Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase__ :Union[str, Any] = nn.ModuleList()
UpperCamelCase__ :str = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase_ , UpperCamelCase_ ) ) )
else:
self.out_projs.append(UpperCamelCase_ )
self.out_layers.append(nn.Linear(UpperCamelCase_ , UpperCamelCase_ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase__ , UpperCamelCase__ :List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ :Dict = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase_ , UpperCamelCase_ ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase_ , r_idx - l_idx ) )
UpperCamelCase__ :Tuple = keep_order
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
if proj is None:
UpperCamelCase__ :List[str] = nn.functional.linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase__ :Any = nn.functional.linear(UpperCamelCase_ , proj.t().contiguous() )
UpperCamelCase__ :Union[str, Any] = nn.functional.linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase__ :Optional[Any] = hidden[..., :-1, :].contiguous()
UpperCamelCase__ :Optional[Any] = labels[..., 1:].contiguous()
UpperCamelCase__ :Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase__ :str = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' )
else:
UpperCamelCase__ :int = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase__ :Optional[int] = self._compute_logit(UpperCamelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase__ :int = labels != -100
UpperCamelCase__ :List[Any] = torch.zeros_like(UpperCamelCase_ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase__ :str = (
-nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase__ :Dict = nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase__ , UpperCamelCase__ :Dict = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ :str = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase__ :Optional[int] = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase__ :Optional[Any] = self.out_layers[i].weight
UpperCamelCase__ :Optional[int] = self.out_layers[i].bias
if i == 0:
UpperCamelCase__ :str = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase__ :List[str] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase_ )
biases.append(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = weights[0], biases[0], self.out_projs[0]
UpperCamelCase__ :str = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :List[Any] = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
if labels is None:
UpperCamelCase__ :Union[str, Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase__ :Any = torch.zeros_like(UpperCamelCase_ , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase__ :Any = 0
UpperCamelCase__ :str = [0] + self.cutoffs
for i in range(len(UpperCamelCase_ ) - 1 ):
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase__ :Any = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase__ :int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase__ :Tuple = labels.index_select(0 , UpperCamelCase_ ) - l_idx
UpperCamelCase__ :str = head_logprob.index_select(0 , UpperCamelCase_ )
UpperCamelCase__ :int = hidden.index_select(0 , UpperCamelCase_ )
else:
UpperCamelCase__ :Dict = hidden
if i == 0:
if labels is not None:
UpperCamelCase__ :Any = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase__ :List[str] = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = weights[i], biases[i], self.out_projs[i]
UpperCamelCase__ :str = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
UpperCamelCase__ :Any = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase__ :List[Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase__ :List[str] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase__ :Dict = logprob_i
if labels is not None:
if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase_ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
if self.n_clusters == 0:
UpperCamelCase__ :Optional[Any] = self._compute_logit(UpperCamelCase_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase_ , dim=-1 )
else:
# construct weights and biases
UpperCamelCase__ , UpperCamelCase__ :List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase__ , UpperCamelCase__ :Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ :Union[str, Any] = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase__ :str = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase__ :List[Any] = self.out_layers[i].weight
UpperCamelCase__ :List[Any] = self.out_layers[i].bias
if i == 0:
UpperCamelCase__ :Dict = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase__ :Optional[Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase_ )
biases.append(UpperCamelCase_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :str = weights[0], biases[0], self.out_projs[0]
UpperCamelCase__ :Optional[int] = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase__ :Union[str, Any] = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
UpperCamelCase__ :int = [0] + self.cutoffs
for i in range(len(UpperCamelCase_ ) - 1 ):
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase__ :str = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = weights[i], biases[i], self.out_projs[i]
UpperCamelCase__ :List[str] = self._compute_logit(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase__ :Tuple = nn.functional.log_softmax(UpperCamelCase_ , dim=1 )
UpperCamelCase__ :Optional[Any] = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase__ :Union[str, Any] = logprob_i
return out | 280 | 0 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Tuple = BarthezTokenizer
_lowercase : Union[str, Any] = BarthezTokenizerFast
_lowercase : Any = True
_lowercase : Optional[int] = True
def _lowercase ( self ):
"""simple docstring"""
super().setUp()
_lowerCAmelCase = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_lowercase )
_lowerCAmelCase = tokenizer
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = """<pad>"""
_lowerCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(_lowercase ) , 101_122 )
def _lowercase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 101_122 )
@require_torch
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_lowerCAmelCase = [0, 57, 3_018, 70_307, 91, 2]
_lowerCAmelCase = self.tokenizer(
_lowercase , max_length=len(_lowercase ) , padding=_lowercase , truncation=_lowercase , return_tensors="""pt""" )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(_lowercase , _lowercase )
def _lowercase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowerCAmelCase = self.get_tokenizer()
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = """I was born in 92000, and this is falsé."""
_lowerCAmelCase = tokenizer.tokenize(_lowercase )
_lowerCAmelCase = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
_lowerCAmelCase = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
_lowerCAmelCase = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
_lowerCAmelCase = self.get_rust_tokenizer()
_lowerCAmelCase = tokenizer.encode(_lowercase )
_lowerCAmelCase = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = {"""input_ids""": [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_lowerCAmelCase = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=_lowercase , )
| 5 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: Path , _lowerCamelCase: str = None , _lowerCamelCase: str = None , _lowerCamelCase: str = None , ):
if config_name_or_path is None:
__SCREAMING_SNAKE_CASE : List[str] = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__SCREAMING_SNAKE_CASE : Tuple = question_encoder_name_or_path
__SCREAMING_SNAKE_CASE : int = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
__SCREAMING_SNAKE_CASE : List[Any] = RagConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = gen_config
__SCREAMING_SNAKE_CASE : Union[str, Any] = question_encoder_config
__SCREAMING_SNAKE_CASE : Dict = model_class.from_pretrained_question_encoder_generator(
_lowerCamelCase , _lowerCamelCase , config=_lowerCamelCase )
rag_model.save_pretrained(_lowerCamelCase )
# Sanity check.
model_class.from_pretrained(_lowerCamelCase )
# Save tokenizers.
__SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained(_lowerCamelCase )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(_lowerCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
UpperCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
UpperCamelCase__ : Dict = parser.parse_args()
UpperCamelCase__ : Any = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
) | 578 | 0 |
from math import loga
def __lowerCamelCase ( __a : int ) -> int:
if a < 0:
raise ValueError("Input value must be a positive integer" )
elif isinstance(__a , __a ):
raise TypeError("Input value must be a 'int' type" )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 | import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
lowerCAmelCase__ = get_logger(__name__)
def __lowerCamelCase ( __a : Dict , __a : Any , __a : Optional[int] , __a : Dict , __a : str=0 ) -> str:
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowercase =model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowercase =f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
_lowercase =os.path.join(__a , __a )
if accelerator.process_index == 0:
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__a , __a )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowercase =(
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_lowercase =os.path.join(__a , __a )
logger.info(f'''Saving model to {output_model_file}''' )
torch.save(__a , __a )
logger.info(f'''Model saved to {output_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowercase =os.path.join(__a , f'''{MODEL_NAME}_{model_index}''' )
os.makedirs(__a , exist_ok=__a )
logger.info(f'''Saving model to {ckpt_dir}''' )
_lowercase ={"model": state_dict}
dist_cp.save_state_dict(
state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f'''Model saved to {ckpt_dir}''' )
def __lowerCamelCase ( __a : Union[str, Any] , __a : Tuple , __a : str , __a : Optional[int] , __a : List[Any]=0 ) -> Optional[Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
_lowercase =f'''{MODEL_NAME}.bin''' if model_index == 0 else f'''{MODEL_NAME}_{model_index}.bin'''
_lowercase =os.path.join(__a , __a )
logger.info(f'''Loading model from {input_model_file}''' )
_lowercase =torch.load(__a )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
_lowercase =(
f'''{MODEL_NAME}_rank{accelerator.process_index}.bin'''
if model_index == 0
else f'''{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'''
)
_lowercase =os.path.join(__a , __a )
logger.info(f'''Loading model from {input_model_file}''' )
_lowercase =torch.load(__a )
logger.info(f'''Model loaded from {input_model_file}''' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
_lowercase =(
os.path.join(__a , f'''{MODEL_NAME}_{model_index}''' )
if f'''{MODEL_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading model from {ckpt_dir}''' )
_lowercase ={"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , )
_lowercase =state_dict["model"]
logger.info(f'''Model loaded from {ckpt_dir}''' )
model.load_state_dict(__a )
def __lowerCamelCase ( __a : Tuple , __a : int , __a : List[Any] , __a : Optional[int] , __a : List[Any] , __a : Any=0 ) -> str:
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
_lowercase =FSDP.optim_state_dict(__a , __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
_lowercase =(
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_lowercase =os.path.join(__a , __a )
logger.info(f'''Saving Optimizer state to {output_optimizer_file}''' )
torch.save(__a , __a )
logger.info(f'''Optimizer state saved in {output_optimizer_file}''' )
else:
_lowercase =os.path.join(__a , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
os.makedirs(__a , exist_ok=__a )
logger.info(f'''Saving Optimizer state to {ckpt_dir}''' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f'''Optimizer state saved in {ckpt_dir}''' )
def __lowerCamelCase ( __a : str , __a : Any , __a : Any , __a : Any , __a : str , __a : List[Any]=0 ) -> Optional[Any]:
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
_lowercase =None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
_lowercase =(
f'''{OPTIMIZER_NAME}.bin''' if optimizer_index == 0 else f'''{OPTIMIZER_NAME}_{optimizer_index}.bin'''
)
_lowercase =os.path.join(__a , __a )
logger.info(f'''Loading Optimizer state from {input_optimizer_file}''' )
_lowercase =torch.load(__a )
logger.info(f'''Optimizer state loaded from {input_optimizer_file}''' )
else:
_lowercase =(
os.path.join(__a , f'''{OPTIMIZER_NAME}_{optimizer_index}''' )
if f'''{OPTIMIZER_NAME}''' not in input_dir
else input_dir
)
logger.info(f'''Loading Optimizer from {ckpt_dir}''' )
_lowercase =load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(__a ) , )
_lowercase =optim_state["optimizer"]
logger.info(f'''Optimizer loaded from {ckpt_dir}''' )
_lowercase =FSDP.optim_state_dict_to_load(__a , __a , __a )
optimizer.load_state_dict(__a )
| 594 | 0 |
"""simple docstring"""
import argparse
import os
import re
_lowerCAmelCase = """src/transformers"""
# Pattern that looks at the indentation in a line.
_lowerCAmelCase = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_lowerCAmelCase = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_lowerCAmelCase = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_lowerCAmelCase = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_lowerCAmelCase = re.compile(r"""\[([^\]]+)\]""")
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase="" , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : Union[str, Any] = code.split('\n' )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[str] = ['\n'.join(lines[:index] )]
else:
_lowerCAmelCase : Dict = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : List[Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ' ' ):
current_block.append(lines[index] )
blocks.append('\n'.join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Optional[Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append('\n'.join(_lowerCamelCase ) )
_lowerCAmelCase : List[Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append('\n'.join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append('\n'.join(lines[index:] ) )
return blocks
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
def _inner(_lowerCamelCase ):
return key(_lowerCamelCase ).lower().replace('_' , '' )
return _inner
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
def noop(_lowerCamelCase ):
return x
if key is None:
_lowerCAmelCase : List[str] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : List[str] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : List[str] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : str = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
def _replace(_lowerCamelCase ):
_lowerCAmelCase : str = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
_lowerCAmelCase : Optional[Any] = [part.strip().replace('"' , '' ) for part in imports.split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : str = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : List[Any] = import_statement.split('\n' )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Optional[Any] = 2 if lines[1].strip() == '[' else 1
_lowerCAmelCase : List[Any] = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : int = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Dict = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Any = [part.strip().replace('"' , '' ) for part in lines[1].split(',' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : str = keys[:-1]
_lowerCAmelCase : int = get_indent(lines[1] ) + ', '.join([f"""\"{k}\"""" for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Optional[int] = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=True ):
'''simple docstring'''
with open(_lowerCamelCase , encoding='utf-8' ) as f:
_lowerCAmelCase : int = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Union[str, Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt='_import_structure = {' , end_prompt='if TYPE_CHECKING:' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : Any = main_blocks[block_idx]
_lowerCAmelCase : Tuple = block.split('\n' )
# Get to the start of the imports.
_lowerCAmelCase : Optional[Any] = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : List[str] = '\n'.join(block_lines[line_idx:-1] )
_lowerCAmelCase : Optional[Any] = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Tuple = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : Dict = _re_direct_key if '_import_structure = {' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Optional[int] = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[Any] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : Dict = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : str = 0
_lowerCAmelCase : int = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : List[str] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : Optional[Any] = '\n'.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(_lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : Optional[Any] = sort_imports(os.path.join(_lowerCamelCase , '__init__.py' ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : Optional[int] = [os.path.join(_lowerCamelCase , '__init__.py' )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f"""Would overwrite {len(_lowerCamelCase )} files, run `make style`.""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_lowerCAmelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 259 |
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase="pt" ):
'''simple docstring'''
_lowerCAmelCase : str = {'add_prefix_space': True} if isinstance(_lowerCamelCase , _lowerCamelCase ) and not line.startswith(' ' ) else {}
_lowerCAmelCase : List[str] = padding_side
return tokenizer(
[line] , max_length=_lowerCamelCase , padding='max_length' if pad_to_max_length else None , truncation=_lowerCamelCase , return_tensors=_lowerCamelCase , add_special_tokens=_lowerCamelCase , **_lowerCamelCase , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , ):
'''simple docstring'''
_lowerCAmelCase : str = input_ids.ne(_lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __UpperCamelCase ( a__ ):
def __init__( self ,_A ,_A ,_A ,_A ,_A="train" ,_A=None ,_A=None ,_A=None ,_A="" ,):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Any = Path(_A ).joinpath(type_path + '.source' )
_lowerCAmelCase : Optional[int] = Path(_A ).joinpath(type_path + '.target' )
_lowerCAmelCase : List[Any] = self.get_char_lens(self.src_file )
_lowerCAmelCase : Tuple = max_source_length
_lowerCAmelCase : Union[str, Any] = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_lowerCAmelCase : Dict = tokenizer
_lowerCAmelCase : List[Any] = prefix
if n_obs is not None:
_lowerCAmelCase : int = self.src_lens[:n_obs]
_lowerCAmelCase : List[str] = src_lang
_lowerCAmelCase : Any = tgt_lang
def __len__( self ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = index + 1 # linecache starts at 1
_lowerCAmelCase : Optional[int] = self.prefix + linecache.getline(str(self.src_file ) ,_A ).rstrip('\n' )
_lowerCAmelCase : Optional[int] = linecache.getline(str(self.tgt_file ) ,_A ).rstrip('\n' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_A ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_lowerCAmelCase : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_A ) else self.tokenizer
)
_lowerCAmelCase : Dict = self.tokenizer.generator if isinstance(self.tokenizer ,_A ) else self.tokenizer
_lowerCAmelCase : Union[str, Any] = encode_line(_A ,_A ,self.max_source_length ,'right' )
_lowerCAmelCase : Optional[int] = encode_line(_A ,_A ,self.max_target_length ,'right' )
_lowerCAmelCase : Tuple = source_inputs['input_ids'].squeeze()
_lowerCAmelCase : int = target_inputs['input_ids'].squeeze()
_lowerCAmelCase : Optional[Any] = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
return [len(_A ) for x in Path(_A ).open().readlines()]
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = torch.stack([x['input_ids'] for x in batch] )
_lowerCAmelCase : List[Any] = torch.stack([x['attention_mask'] for x in batch] )
_lowerCAmelCase : Any = torch.stack([x['decoder_input_ids'] for x in batch] )
_lowerCAmelCase : List[str] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_A )
else self.tokenizer.pad_token_id
)
_lowerCAmelCase : List[str] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_A )
else self.tokenizer.pad_token_id
)
_lowerCAmelCase : int = trim_batch(_A ,_A )
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = trim_batch(_A ,_A ,attention_mask=_A )
_lowerCAmelCase : List[str] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_lowerCAmelCase = getLogger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return list(itertools.chain.from_iterable(_lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = get_git_info()
save_json(_lowerCamelCase , os.path.join(_lowerCamelCase , 'git_log.json' ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=4 , **_lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , 'w' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=_lowerCamelCase , **_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase ) as f:
return json.load(_lowerCamelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = git.Repo(search_parent_directories=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = {
'repo_id': str(_lowerCamelCase ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return list(map(_lowerCamelCase , _lowerCamelCase ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , 'wb' ) as f:
return pickle.dump(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
def remove_articles(_lowerCamelCase ):
return re.sub(R'\b(a|an|the)\b' , ' ' , _lowerCamelCase )
def white_space_fix(_lowerCamelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = normalize_answer(_lowerCamelCase ).split()
_lowerCAmelCase : List[str] = normalize_answer(_lowerCamelCase ).split()
_lowerCAmelCase : Optional[Any] = Counter(_lowerCamelCase ) & Counter(_lowerCamelCase )
_lowerCAmelCase : List[Any] = sum(common.values() )
if num_same == 0:
return 0
_lowerCAmelCase : Any = 1.0 * num_same / len(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = 1.0 * num_same / len(_lowerCamelCase )
_lowerCAmelCase : str = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = 0
for hypo, pred in zip(_lowerCamelCase , _lowerCamelCase ):
em += exact_match_score(_lowerCamelCase , _lowerCamelCase )
if len(_lowerCamelCase ) > 0:
em /= len(_lowerCamelCase )
return {"em": em}
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_lowerCAmelCase : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
if not hasattr(_lowerCamelCase , _lowerCamelCase ) and not hasattr(_lowerCamelCase , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(_lowerCamelCase ) )
delattr(_lowerCamelCase , _lowerCamelCase )
continue
_lowerCAmelCase : Optional[Any] = p if hasattr(_lowerCamelCase , _lowerCamelCase ) else equivalent_param[p]
setattr(_lowerCamelCase , _lowerCamelCase , getattr(_lowerCamelCase , _lowerCamelCase ) )
delattr(_lowerCamelCase , _lowerCamelCase )
return hparams, config
| 259 | 1 |
from __future__ import annotations
import time
A__ = list[tuple[int, int]]
A__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class a :
def __init__( self :Union[str, Any] ,__lowercase :int ,__lowercase :int ,__lowercase :int ,__lowercase :int ,__lowercase :Node | None ):
snake_case__ : List[Any] = pos_x
snake_case__ : Optional[int] = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : List[Any] = goal_x
snake_case__ : Any = goal_y
snake_case__ : List[str] = parent
class a :
def __init__( self :Optional[int] ,__lowercase :tuple[int, int] ,__lowercase :tuple[int, int] ):
snake_case__ : Optional[Any] = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,__lowercase )
snake_case__ : Optional[int] = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,__lowercase )
snake_case__ : List[str] = [self.start]
snake_case__ : Optional[int] = False
def __lowerCamelCase ( self :int ):
while self.node_queue:
snake_case__ : List[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Tuple = True
return self.retrace_path(__lowercase )
snake_case__ : Optional[int] = self.get_successors(__lowercase )
for node in successors:
self.node_queue.append(__lowercase )
if not self.reached:
return [self.start.pos]
return None
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :Node ):
snake_case__ : Tuple = []
for action in delta:
snake_case__ : Tuple = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__lowercase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__lowercase ,__lowercase ,self.target.pos_y ,self.target.pos_x ,__lowercase ) )
return successors
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Node | None ):
snake_case__ : List[str] = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Any = current_node.parent
path.reverse()
return path
class a :
def __init__( self :Dict ,__lowercase :List[str] ,__lowercase :Dict ):
snake_case__ : Any = BreadthFirstSearch(__lowercase ,__lowercase )
snake_case__ : Tuple = BreadthFirstSearch(__lowercase ,__lowercase )
snake_case__ : Union[str, Any] = False
def __lowerCamelCase ( self :str ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : int = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : Tuple = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : Optional[int] = True
return self.retrace_bidirectional_path(
__lowercase ,__lowercase )
snake_case__ : Tuple = current_bwd_node
snake_case__ : List[str] = current_fwd_node
snake_case__ : str = {
self.fwd_bfs: self.fwd_bfs.get_successors(__lowercase ),
self.bwd_bfs: self.bwd_bfs.get_successors(__lowercase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__lowercase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __lowerCamelCase ( self :List[str] ,__lowercase :Node ,__lowercase :Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__lowercase )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__lowercase )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A__ = (0, 0)
A__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A__ = time.time()
A__ = BreadthFirstSearch(init, goal)
A__ = bfs.search()
A__ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
A__ = time.time()
A__ = BidirectionalBreadthFirstSearch(init, goal)
A__ = bd_bfs.search()
A__ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 718 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase="pt" ) -> List[Any]:
"""simple docstring"""
snake_case__ : List[Any] = {'''add_prefix_space''': True} if isinstance(__lowerCAmelCase , __lowerCAmelCase ) and not line.startswith(''' ''' ) else {}
snake_case__ : int = padding_side
return tokenizer(
[line] , max_length=__lowerCAmelCase , padding='''max_length''' if pad_to_max_length else None , truncation=__lowerCAmelCase , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , ) -> int:
"""simple docstring"""
snake_case__ : Tuple = input_ids.ne(__lowerCAmelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a ( __lowerCamelCase ):
def __init__( self :str ,__lowercase :List[Any] ,__lowercase :Optional[int] ,__lowercase :str ,__lowercase :List[Any] ,__lowercase :Union[str, Any]="train" ,__lowercase :Any=None ,__lowercase :List[str]=None ,__lowercase :Any=None ,__lowercase :Optional[Any]="" ,):
super().__init__()
snake_case__ : Dict = Path(__lowercase ).joinpath(type_path + '''.source''' )
snake_case__ : List[Any] = Path(__lowercase ).joinpath(type_path + '''.target''' )
snake_case__ : List[Any] = self.get_char_lens(self.src_file )
snake_case__ : List[str] = max_source_length
snake_case__ : str = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
snake_case__ : Any = tokenizer
snake_case__ : int = prefix
if n_obs is not None:
snake_case__ : Tuple = self.src_lens[:n_obs]
snake_case__ : Optional[int] = src_lang
snake_case__ : int = tgt_lang
def __len__( self :str ):
return len(self.src_lens )
def __getitem__( self :Tuple ,__lowercase :List[str] ):
snake_case__ : Optional[Any] = index + 1 # linecache starts at 1
snake_case__ : Dict = self.prefix + linecache.getline(str(self.src_file ) ,__lowercase ).rstrip('''\n''' )
snake_case__ : List[str] = linecache.getline(str(self.tgt_file ) ,__lowercase ).rstrip('''\n''' )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,__lowercase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
snake_case__ : Union[str, Any] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,__lowercase ) else self.tokenizer
)
snake_case__ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,__lowercase ) else self.tokenizer
snake_case__ : List[Any] = encode_line(__lowercase ,__lowercase ,self.max_source_length ,'''right''' )
snake_case__ : Any = encode_line(__lowercase ,__lowercase ,self.max_target_length ,'''right''' )
snake_case__ : Optional[int] = source_inputs['''input_ids'''].squeeze()
snake_case__ : Optional[Any] = target_inputs['''input_ids'''].squeeze()
snake_case__ : Optional[int] = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __lowerCamelCase ( __lowercase :List[Any] ):
return [len(__lowercase ) for x in Path(__lowercase ).open().readlines()]
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :int ):
snake_case__ : List[str] = torch.stack([x['''input_ids'''] for x in batch] )
snake_case__ : Any = torch.stack([x['''attention_mask'''] for x in batch] )
snake_case__ : List[Any] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
snake_case__ : Any = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,__lowercase )
else self.tokenizer.pad_token_id
)
snake_case__ : Any = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,__lowercase )
else self.tokenizer.pad_token_id
)
snake_case__ : List[Any] = trim_batch(__lowercase ,__lowercase )
snake_case__ , snake_case__ : int = trim_batch(__lowercase ,__lowercase ,attention_mask=__lowercase )
snake_case__ : Union[str, Any] = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
A__ = getLogger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
return list(itertools.chain.from_iterable(__lowerCAmelCase ) )
def _lowerCAmelCase ( __lowerCAmelCase ) -> None:
"""simple docstring"""
snake_case__ : Tuple = get_git_info()
save_json(__lowerCAmelCase , os.path.join(__lowerCAmelCase , '''git_log.json''' ) )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=4 , **__lowerCAmelCase ) -> Dict:
"""simple docstring"""
with open(__lowerCAmelCase , '''w''' ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase , indent=__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
with open(__lowerCAmelCase ) as f:
return json.load(__lowerCAmelCase )
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = git.Repo(search_parent_directories=__lowerCAmelCase )
snake_case__ : Optional[Any] = {
'''repo_id''': str(__lowerCAmelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> List:
"""simple docstring"""
return list(map(__lowerCAmelCase , __lowerCAmelCase ) )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
with open(__lowerCAmelCase , '''wb''' ) as f:
return pickle.dump(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
def remove_articles(__lowerCAmelCase ):
return re.sub(r'''\b(a|an|the)\b''' , ''' ''' , __lowerCAmelCase )
def white_space_fix(__lowerCAmelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCAmelCase ):
snake_case__ : Tuple = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCAmelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCAmelCase ) ) ) )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Dict = normalize_answer(__lowerCAmelCase ).split()
snake_case__ : Dict = normalize_answer(__lowerCAmelCase ).split()
snake_case__ : int = Counter(__lowerCAmelCase ) & Counter(__lowerCAmelCase )
snake_case__ : Union[str, Any] = sum(common.values() )
if num_same == 0:
return 0
snake_case__ : List[str] = 1.0 * num_same / len(__lowerCAmelCase )
snake_case__ : Dict = 1.0 * num_same / len(__lowerCAmelCase )
snake_case__ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
return normalize_answer(__lowerCAmelCase ) == normalize_answer(__lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
"""simple docstring"""
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
snake_case__ : Optional[int] = 0
for hypo, pred in zip(__lowerCAmelCase , __lowerCAmelCase ):
em += exact_match_score(__lowerCAmelCase , __lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
em /= len(__lowerCAmelCase )
return {"em": em}
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
return model_prefix.startswith('''rag''' )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : str = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
snake_case__ : Union[str, Any] = '''dropout_rate'''
for p in extra_params:
if getattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not hasattr(__lowerCAmelCase , __lowerCAmelCase ) and not hasattr(__lowerCAmelCase , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__lowerCAmelCase ) )
delattr(__lowerCAmelCase , __lowerCAmelCase )
continue
snake_case__ : Optional[Any] = p if hasattr(__lowerCAmelCase , __lowerCAmelCase ) else equivalent_param[p]
setattr(__lowerCAmelCase , __lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
delattr(__lowerCAmelCase , __lowerCAmelCase )
return hparams, config
| 219 | 0 |
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> List[Any]:
snake_case__ : List[Any] = [0] * len(_lowerCAmelCase )
snake_case__ : Any = []
snake_case__ : List[Any] = []
snake_case__ : Optional[Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(_lowerCAmelCase )
while queue:
snake_case__ : Optional[int] = queue.pop(0 )
cnt += 1
topo.append(_lowerCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_lowerCAmelCase )
if cnt != len(_lowerCAmelCase ):
print("""Cycle exists""" )
else:
print(_lowerCAmelCase )
# Adjacency List of Graph
__a = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 374 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
__a = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 374 | 1 |
"""simple docstring"""
_snake_case : Optional[int] = 'Alexander Joslin'
import operator as op
from .stack import Stack
def A__ ( UpperCamelCase ):
A = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
A = Stack()
A = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(UpperCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(UpperCamelCase )
elif i == ")":
# RULE 4
A = operator_stack.peek()
operator_stack.pop()
A = operand_stack.peek()
operand_stack.pop()
A = operand_stack.peek()
operand_stack.pop()
A = operators[opr](UpperCamelCase , UpperCamelCase )
operand_stack.push(UpperCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_snake_case : int = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 524 |
"""simple docstring"""
from itertools import permutations
def A__ ( UpperCamelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A = [7, 11, 13, 17]
for i, test in enumerate(UpperCamelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def A__ ( UpperCamelCase = 10 ):
return sum(
int("".join(map(UpperCamelCase , UpperCamelCase ) ) )
for num in permutations(range(UpperCamelCase ) )
if is_substring_divisible(UpperCamelCase ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 524 | 1 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
# Base Case
if curr_ind == len(lowerCAmelCase_ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(lowerCAmelCase_ ) ):
if valid_connection(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
# Insert current vertex into path as next transition
_a : Tuple = next_ver
# Validate created path
if util_hamilton_cycle(lowerCAmelCase_ , lowerCAmelCase_ , curr_ind + 1 ):
return True
# Backtrack
_a : Tuple = -1
return False
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = 0 ) -> list[int]:
_a : str = [-1] * (len(lowerCAmelCase_ ) + 1)
# initialize start and end of path with starting index
_a : int = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(lowerCAmelCase_ , lowerCAmelCase_ , 1 ) else []
| 358 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__lowerCAmelCase = True
except (ImportError, ModuleNotFoundError):
__lowerCAmelCase = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __lowerCamelCase ( lowerCAmelCase_ ) -> str:
re.sub('<n>' , '' , lowerCAmelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase_ ) )
| 358 | 1 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__A = (
"""https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"""
)
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
def __A () ->List[str]:
"""simple docstring"""
lowerCAmelCase__ :str = 'https://pypi.org/pypi/diffusers/json'
lowerCAmelCase__ :Optional[int] = json.loads(request.urlopen(_SCREAMING_SNAKE_CASE ).read() )['releases'].keys()
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : version.Version(_SCREAMING_SNAKE_CASE ) )
def __A () ->List[str]:
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :int = Path(_SCREAMING_SNAKE_CASE ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
init_hf_modules()
lowerCAmelCase__ :Optional[int] = Path(_SCREAMING_SNAKE_CASE ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def __A (_SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase__ :Union[str, Any] = f.read()
# Imports of the form `import .xxx`
lowerCAmelCase__ :Tuple = re.findall('^\s*import\s+\.(\S+)\s*$' , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Unique-ify
return list(set(_SCREAMING_SNAKE_CASE ) )
def __A (_SCREAMING_SNAKE_CASE ) ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :Tuple = False
lowerCAmelCase__ :List[str] = [module_file]
lowerCAmelCase__ :Union[str, Any] = []
# Let's recurse through all relative imports
while not no_change:
lowerCAmelCase__ :Dict = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ :int = Path(_SCREAMING_SNAKE_CASE ).parent
lowerCAmelCase__ :Optional[Any] = [str(module_path / m ) for m in new_imports]
lowerCAmelCase__ :Optional[Any] = [f for f in new_import_files if f not in all_relative_imports]
lowerCAmelCase__ :Any = [F"{f}.py" for f in new_import_files]
lowerCAmelCase__ :List[str] = len(_SCREAMING_SNAKE_CASE ) == 0
all_relative_imports.extend(_SCREAMING_SNAKE_CASE )
return all_relative_imports
def __A (_SCREAMING_SNAKE_CASE ) ->int:
"""simple docstring"""
with open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
lowerCAmelCase__ :Tuple = f.read()
# Imports of the form `import xxx`
lowerCAmelCase__ :List[Any] = re.findall('^\s*import\s+(\S+)\s*$' , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , _SCREAMING_SNAKE_CASE , flags=re.MULTILINE )
# Only keep the top-level module
lowerCAmelCase__ :Optional[int] = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
lowerCAmelCase__ :str = list(set(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ :List[str] = []
for imp in imports:
try:
importlib.import_module(_SCREAMING_SNAKE_CASE )
except ImportError:
missing_packages.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F"{', '.join(_SCREAMING_SNAKE_CASE )}. Run `pip install {' '.join(_SCREAMING_SNAKE_CASE )}`" )
return get_relative_imports(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = module_path.replace(os.path.sep , '.' )
lowerCAmelCase__ :int = importlib.import_module(_SCREAMING_SNAKE_CASE )
if class_name is None:
return find_pipeline_class(_SCREAMING_SNAKE_CASE )
return getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->List[str]:
"""simple docstring"""
from ..pipelines import DiffusionPipeline
lowerCAmelCase__ :Any = dict(inspect.getmembers(_SCREAMING_SNAKE_CASE , inspect.isclass ) )
lowerCAmelCase__ :List[Any] = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _SCREAMING_SNAKE_CASE )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
F" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
F" {loaded_module}." )
lowerCAmelCase__ :Optional[Any] = cls
return pipeline_class
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = str(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = module_file_or_url
lowerCAmelCase__ :List[Any] = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
lowerCAmelCase__ :Dict = get_diffusers_versions()
# cut ".dev0"
lowerCAmelCase__ :Optional[int] = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
lowerCAmelCase__ :Any = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
lowerCAmelCase__ :Optional[int] = F"v{revision}"
elif revision == "main":
lowerCAmelCase__ :Union[str, Any] = revision
else:
raise ValueError(
F"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
F" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
lowerCAmelCase__ :Dict = COMMUNITY_PIPELINES_URL.format(revision=_SCREAMING_SNAKE_CASE , pipeline=_SCREAMING_SNAKE_CASE )
try:
lowerCAmelCase__ :List[str] = cached_download(
_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ :Any = 'git'
lowerCAmelCase__ :str = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
lowerCAmelCase__ :Optional[int] = hf_hub_download(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ :Union[str, Any] = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
lowerCAmelCase__ :List[str] = check_imports(_SCREAMING_SNAKE_CASE )
# Now we move the module inside our cached dynamic modules.
lowerCAmelCase__ :str = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Tuple = Path(_SCREAMING_SNAKE_CASE ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_SCREAMING_SNAKE_CASE , submodule_path / module_file )
for module_needed in modules_needed:
lowerCAmelCase__ :Union[str, Any] = F"{module_needed}.py"
shutil.copy(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :str = use_auth_token
elif use_auth_token is True:
lowerCAmelCase__ :Optional[int] = HfFolder.get_token()
else:
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ :Union[str, Any] = model_info(_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
lowerCAmelCase__ :Tuple = submodule_path / commit_hash
lowerCAmelCase__ :List[str] = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_SCREAMING_SNAKE_CASE )
if not (submodule_path / module_file).exists():
shutil.copy(_SCREAMING_SNAKE_CASE , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_SCREAMING_SNAKE_CASE , F"{module_needed}.py" , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
return os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , **_SCREAMING_SNAKE_CASE , ) ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :int = get_cached_module_file(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , resume_download=_SCREAMING_SNAKE_CASE , proxies=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , local_files_only=_SCREAMING_SNAKE_CASE , )
return get_class_in_module(_SCREAMING_SNAKE_CASE , final_module.replace('.py' , '' ) )
| 560 |
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[str] = """SpeechT5FeatureExtractor"""
__magic_name__ :List[Any] = """SpeechT5Tokenizer"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
def __call__( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = kwargs.pop('audio' , __UpperCAmelCase )
lowerCAmelCase__ :int = kwargs.pop('text' , __UpperCAmelCase )
lowerCAmelCase__ :Any = kwargs.pop('text_target' , __UpperCAmelCase )
lowerCAmelCase__ :int = kwargs.pop('audio_target' , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = kwargs.pop('sampling_rate' , __UpperCAmelCase )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
lowerCAmelCase__ :List[str] = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
elif text is not None:
lowerCAmelCase__ :str = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
else:
lowerCAmelCase__ :Any = None
if audio_target is not None:
lowerCAmelCase__ :int = self.feature_extractor(audio_target=__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :int = targets['input_values']
elif text_target is not None:
lowerCAmelCase__ :Optional[int] = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Dict = targets['input_ids']
else:
lowerCAmelCase__ :Dict = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase__ :Union[str, Any] = labels
lowerCAmelCase__ :Dict = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase__ :Dict = decoder_attention_mask
return inputs
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = kwargs.pop('input_values' , __UpperCAmelCase )
lowerCAmelCase__ :List[Any] = kwargs.pop('input_ids' , __UpperCAmelCase )
lowerCAmelCase__ :Any = kwargs.pop('labels' , __UpperCAmelCase )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
lowerCAmelCase__ :Union[str, Any] = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
elif input_ids is not None:
lowerCAmelCase__ :Optional[int] = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase )
else:
lowerCAmelCase__ :int = None
if labels is not None:
if "input_ids" in labels or (isinstance(__UpperCAmelCase , __UpperCAmelCase ) and "input_ids" in labels[0]):
lowerCAmelCase__ :List[str] = self.tokenizer.pad(__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = targets['input_ids']
else:
lowerCAmelCase__ :Optional[int] = self.feature_extractor.feature_size
lowerCAmelCase__ :int = self.feature_extractor.num_mel_bins
lowerCAmelCase__ :Dict = self.feature_extractor.pad(__UpperCAmelCase , *__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = feature_size_hack
lowerCAmelCase__ :str = targets['input_values']
else:
lowerCAmelCase__ :Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase__ :Union[str, Any] = labels
lowerCAmelCase__ :List[Any] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase__ :Tuple = decoder_attention_mask
return inputs
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def snake_case ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
| 560 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='markuplm'
def __init__( self : Dict , __a : Optional[int]=3_05_22 , __a : str=7_68 , __a : List[Any]=12 , __a : Dict=12 , __a : Tuple=30_72 , __a : str="gelu" , __a : Optional[Any]=0.1 , __a : Optional[int]=0.1 , __a : int=5_12 , __a : Optional[int]=2 , __a : str=0.02 , __a : int=1e-1_2 , __a : int=0 , __a : Dict=0 , __a : List[str]=2 , __a : Any=2_56 , __a : Any=10_24 , __a : int=2_16 , __a : str=10_01 , __a : Optional[Any]=32 , __a : Tuple=50 , __a : str="absolute" , __a : Union[str, Any]=True , __a : str=None , **__a : Tuple , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a , )
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = hidden_act
_a = intermediate_size
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = initializer_range
_a = layer_norm_eps
_a = position_embedding_type
_a = use_cache
_a = classifier_dropout
# additional properties
_a = max_depth
_a = max_xpath_tag_unit_embeddings
_a = max_xpath_subs_unit_embeddings
_a = tag_pad_id
_a = subs_pad_id
_a = xpath_unit_hidden_size
| 692 |
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=snake_case , scheduler=snake_case )
@torch.no_grad()
def __call__( self , snake_case = 1 , snake_case = None , snake_case = 50 , snake_case = "pil" , snake_case = True , **snake_case , ):
'''simple docstring'''
UpperCamelCase__ = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=snake_case , )
UpperCamelCase__ = image.to(self.device )
# set step values
self.scheduler.set_timesteps(snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase__ = self.unet(snake_case , snake_case ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(snake_case , snake_case , snake_case ).prev_sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=snake_case ), "This is a local test"
| 551 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( a : Tuple ) -> str:
"""simple docstring"""
lowercase_ : Tuple = len(a )
for i in range(length - 1 ):
lowercase_ : str = i
for k in range(i + 1 , a ):
if collection[k] < collection[least]:
lowercase_ : Optional[int] = k
if least != i:
lowercase_ , lowercase_ : List[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
A: int = input("Enter numbers separated by a comma:\n").strip()
A: int = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 7 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A: List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
A: Union[str, Any] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"A red cartoon frog, 4k\"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16\n ... )\n >>> pipe.to(\"cuda\")\n\n >>> init_image = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/frog.png\"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save(\"red_frog.png\")\n ```\n"
def _UpperCAmelCase ( a : Tuple , a : Union[str, Any] , a : List[Any]=8 ) -> Dict:
"""simple docstring"""
lowercase_ : List[Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase_ : List[str] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _UpperCAmelCase ( a : Any , a : Dict=5_1_2 , a : Optional[Any]=5_1_2 ) -> Tuple:
"""simple docstring"""
lowercase_ : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
lowercase_ : int = np.array(pil_image.convert('RGB' ) )
lowercase_ : Optional[int] = arr.astype(np.floataa ) / 1_27.5 - 1
lowercase_ : Any = np.transpose(a , [2, 0, 1] )
lowercase_ : Any = torch.from_numpy(a ).unsqueeze(0 )
return image
class __magic_name__ ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase , _lowercase , ) -> List[Any]:
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase_ : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> int:
# get the original timestep using init_timestep
lowercase_ : List[Any] = min(int(num_inference_steps * strength ) , _lowercase )
lowercase_ : Tuple = max(num_inference_steps - init_timestep , 0 )
lowercase_ : Optional[Any] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=None ) -> Any:
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}" )
lowercase_ : Dict = image.to(device=_lowercase , dtype=_lowercase )
lowercase_ : Dict = batch_size * num_images_per_prompt
if image.shape[1] == 4:
lowercase_ : str = image
else:
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
elif isinstance(_lowercase , _lowercase ):
lowercase_ : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_lowercase )
]
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
else:
lowercase_ : Union[str, Any] = self.movq.encode(_lowercase ).latent_dist.sample(_lowercase )
lowercase_ : str = self.movq.config.scaling_factor * init_latents
lowercase_ : int = torch.cat([init_latents] , dim=0 )
lowercase_ : Dict = init_latents.shape
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
lowercase_ : List[str] = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
lowercase_ : Optional[Any] = init_latents
return latents
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
lowercase_ : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def lowerCamelCase__ ( self , _lowercase=0 ) -> int:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowercase_ : List[Any] = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase_ : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase_ , lowercase_ : Dict = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase_ : List[str] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCamelCase__ ( self ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self , _lowercase , _lowercase , _lowercase , _lowercase = 512 , _lowercase = 512 , _lowercase = 100 , _lowercase = 4.0 , _lowercase = 0.3 , _lowercase = 1 , _lowercase = None , _lowercase = "pil" , _lowercase = True , ) -> str:
lowercase_ : List[Any] = self._execution_device
lowercase_ : List[Any] = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = torch.cat(_lowercase , dim=0 )
lowercase_ : Optional[Any] = image_embeds.shape[0]
if isinstance(_lowercase , _lowercase ):
lowercase_ : List[str] = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase_ : List[str] = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase_ : Dict = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
if not isinstance(_lowercase , _lowercase ):
lowercase_ : Union[str, Any] = [image]
if not all(isinstance(_lowercase , (PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
f"Input is in incorrect format: {[type(_lowercase ) for i in image]}. Currently, we only support PIL image and pytorch tensor" )
lowercase_ : List[Any] = torch.cat([prepare_image(_lowercase , _lowercase , _lowercase ) for i in image] , dim=0 )
lowercase_ : Dict = image.to(dtype=image_embeds.dtype , device=_lowercase )
lowercase_ : Dict = self.movq.encode(_lowercase )['latents']
lowercase_ : Optional[Any] = latents.repeat_interleave(_lowercase , dim=0 )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase_ , lowercase_ : str = self.get_timesteps(_lowercase , _lowercase , _lowercase )
lowercase_ : int = timesteps[:1].repeat(batch_size * num_images_per_prompt )
lowercase_ , lowercase_ : Union[str, Any] = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
lowercase_ : List[str] = self.prepare_latents(
_lowercase , _lowercase , _lowercase , _lowercase , image_embeds.dtype , _lowercase , _lowercase )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase_ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase_ : str = {'image_embeds': image_embeds}
lowercase_ : str = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase_ , lowercase_ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
lowercase_ , lowercase_ : Optional[int] = noise_pred.chunk(2 )
lowercase_ , lowercase_ : Tuple = variance_pred.chunk(2 )
lowercase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase_ : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase_ , lowercase_ : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase_ : Dict = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase_ : Any = self.movq.decode(_lowercase , force_not_quantize=_lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
lowercase_ : Dict = image * 0.5 + 0.5
lowercase_ : Dict = image.clamp(0 , 1 )
lowercase_ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase_ : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 7 | 1 |
from collections.abc import Callable
import numpy as np
def a__ ( A__, A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ : Dict = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE_ : int = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE_ : int = ya
SCREAMING_SNAKE_CASE_ : Dict = xa
for k in range(A__ ):
SCREAMING_SNAKE_CASE_ : Any = y[k] + step_size * ode_func(A__, y[k] )
SCREAMING_SNAKE_CASE_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(A__, y[k] ) + ode_func(x + step_size, A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str]=2 , lowerCamelCase__ :List[str]=3 , lowerCamelCase__ :List[str]=4 , lowerCamelCase__ :str=2 , lowerCamelCase__ :Optional[int]=7 , lowerCamelCase__ :List[Any]=True , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Union[str, Any]=True , lowerCamelCase__ :Any=True , lowerCamelCase__ :Dict=99 , lowerCamelCase__ :Optional[Any]=36 , lowerCamelCase__ :str=2 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Optional[Any]=37 , lowerCamelCase__ :Optional[int]="gelu" , lowerCamelCase__ :Any=0.1 , lowerCamelCase__ :List[Any]=0.1 , lowerCamelCase__ :List[Any]=5_12 , lowerCamelCase__ :str=16 , lowerCamelCase__ :Tuple=2 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :List[Any]=6 , lowerCamelCase__ :List[str]=6 , lowerCamelCase__ :Optional[int]=3 , lowerCamelCase__ :Optional[int]=4 , lowerCamelCase__ :int=None , lowerCamelCase__ :Optional[Any]=10_00 , ):
UpperCamelCase__ :Any = parent
UpperCamelCase__ :Union[str, Any] = batch_size
UpperCamelCase__ :Dict = num_channels
UpperCamelCase__ :Optional[Any] = image_size
UpperCamelCase__ :Union[str, Any] = patch_size
UpperCamelCase__ :Union[str, Any] = is_training
UpperCamelCase__ :str = use_input_mask
UpperCamelCase__ :int = use_token_type_ids
UpperCamelCase__ :int = use_labels
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :List[str] = hidden_size
UpperCamelCase__ :List[Any] = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Any = hidden_act
UpperCamelCase__ :Optional[int] = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :Union[str, Any] = type_sequence_label_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[Any] = coordinate_size
UpperCamelCase__ :Tuple = shape_size
UpperCamelCase__ :Dict = num_labels
UpperCamelCase__ :str = num_choices
UpperCamelCase__ :Tuple = scope
UpperCamelCase__ :str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCamelCase__ :List[str] = text_seq_length
UpperCamelCase__ :List[str] = (image_size // patch_size) ** 2 + 1
UpperCamelCase__ :Dict = self.text_seq_length + self.image_seq_length
def __a ( self :Tuple ):
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCamelCase__ :int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCamelCase__ :str = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCamelCase__ :List[str] = bbox[i, j, 3]
UpperCamelCase__ :Optional[int] = bbox[i, j, 1]
UpperCamelCase__ :Optional[Any] = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCamelCase__ :Tuple = bbox[i, j, 2]
UpperCamelCase__ :Optional[Any] = bbox[i, j, 0]
UpperCamelCase__ :List[str] = tmp_coordinate
UpperCamelCase__ :Dict = tf.constant(lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ :Any = None
if self.use_input_mask:
UpperCamelCase__ :int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCamelCase__ :Optional[Any] = None
if self.use_token_type_ids:
UpperCamelCase__ :Optional[int] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCamelCase__ :List[str] = None
UpperCamelCase__ :Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ :Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCamelCase__ :Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __a ( self :List[Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :str , lowerCamelCase__ :int , lowerCamelCase__ :Any ):
UpperCamelCase__ :Dict = TFLayoutLMvaModel(config=lowerCamelCase__ )
# text + image
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , training=lowerCamelCase__ , )
UpperCamelCase__ :str = model(lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCamelCase__ :Optional[int] = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCamelCase__ :Tuple = model({"""pixel_values""": pixel_values} , training=lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :str , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :str ):
UpperCamelCase__ :Optional[Any] = self.num_labels
UpperCamelCase__ :List[Any] = TFLayoutLMvaForSequenceClassification(config=lowerCamelCase__ )
UpperCamelCase__ :List[str] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self :List[str] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Dict , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Tuple , lowerCamelCase__ :List[str] ):
UpperCamelCase__ :Union[str, Any] = self.num_labels
UpperCamelCase__ :Dict = TFLayoutLMvaForTokenClassification(config=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __a ( self :int , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Dict , lowerCamelCase__ :Tuple , lowerCamelCase__ :Tuple ):
UpperCamelCase__ :Dict = 2
UpperCamelCase__ :Tuple = TFLayoutLMvaForQuestionAnswering(config=lowerCamelCase__ )
UpperCamelCase__ :int = model(
lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , training=lowerCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self :List[Any] ):
UpperCamelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
((UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__) , (UpperCamelCase__)) :Any = config_and_inputs
UpperCamelCase__ :List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( lowercase , lowercase , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
_snake_case : Dict = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
_snake_case : Optional[int] = False
_snake_case : List[str] = False
_snake_case : Tuple = False
def __a ( self :str , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple , lowerCamelCase__ :int ):
return True
def __a ( self :Optional[int] , lowerCamelCase__ :int , lowerCamelCase__ :List[str] , lowerCamelCase__ :Optional[int]=False ):
UpperCamelCase__ :List[str] = copy.deepcopy(lowerCamelCase__ )
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = {
k: tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(lowerCamelCase__ , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :str = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :List[str] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCamelCase__ :Union[str, Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(lowerCamelCase__ ):
UpperCamelCase__ :Tuple = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def __a ( self :Dict ):
UpperCamelCase__ :List[Any] = TFLayoutLMvaModelTester(self )
UpperCamelCase__ :Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def __a ( self :Any ):
self.config_tester.run_common_tests()
def __a ( self :Optional[int] ):
UpperCamelCase__ , UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ :Optional[int] = model_class(lowerCamelCase__ )
if getattr(lowerCamelCase__ , """hf_compute_loss""" , lowerCamelCase__ ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=lowerCamelCase__ )[0]
]
UpperCamelCase__ :Union[str, Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCamelCase__ :List[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
UpperCamelCase__ :List[str] = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCamelCase__ :Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = prepared_for_class.pop("""input_ids""" )
if "labels" in prepared_for_class:
UpperCamelCase__ :List[str] = prepared_for_class["""labels"""].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCamelCase__ :Optional[Any] = -1_00
UpperCamelCase__ :Union[str, Any] = tf.convert_to_tensor(lowerCamelCase__ )
UpperCamelCase__ :Tuple = model(lowerCamelCase__ , **lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCamelCase__ :Optional[Any] = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
UpperCamelCase__ :Union[str, Any] = model(lowerCamelCase__ )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCamelCase__ :Dict = self._prepare_for_class(inputs_dict.copy() , lowerCamelCase__ , return_labels=lowerCamelCase__ )
# Get keys that were added with the _prepare_for_class function
UpperCamelCase__ :str = prepared_for_class.keys() - inputs_dict.keys()
UpperCamelCase__ :Tuple = inspect.signature(model.call ).parameters
UpperCamelCase__ :str = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCamelCase__ :Any = {0: """input_ids"""}
for label_key in label_keys:
UpperCamelCase__ :Dict = signature_names.index(lowerCamelCase__ )
UpperCamelCase__ :Optional[int] = label_key
UpperCamelCase__ :Optional[Any] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCamelCase__ :Any = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCamelCase__ :List[str] = prepared_for_class[value]
UpperCamelCase__ :Union[str, Any] = tuple(lowerCamelCase__ )
# Send to model
UpperCamelCase__ :str = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Any ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase__ :Dict = type
self.model_tester.create_and_check_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Tuple ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :Optional[int] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __a ( self :List[str] ):
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@slow
def __a ( self :Optional[int] ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Dict = TFLayoutLMvaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def A ( ) -> List[str]:
UpperCamelCase__ :List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self :Optional[Any] ):
return LayoutLMvaImageProcessor(apply_ocr=lowerCamelCase__ ) if is_vision_available() else None
@slow
def __a ( self :Dict ):
UpperCamelCase__ :List[str] = TFLayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" )
UpperCamelCase__ :List[Any] = self.default_image_processor
UpperCamelCase__ :str = prepare_img()
UpperCamelCase__ :Any = image_processor(images=lowerCamelCase__ , return_tensors="""tf""" ).pixel_values
UpperCamelCase__ :str = tf.constant([[1, 2]] )
UpperCamelCase__ :Any = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCamelCase__ :Dict = model(input_ids=lowerCamelCase__ , bbox=lowerCamelCase__ , pixel_values=lowerCamelCase__ , training=lowerCamelCase__ )
# verify the logits
UpperCamelCase__ :int = (1, 1_99, 7_68)
self.assertEqual(outputs.last_hidden_state.shape , lowerCamelCase__ )
UpperCamelCase__ :List[Any] = tf.constant(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=1e-4 ) ) | 45 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'CarlCochet/trajectory-transformer-halfcheetah-medium-v2': (
'https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Optional[int] = "trajectory_transformer"
a : Dict = ["past_key_values"]
a : Optional[int] = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, __magic_name__=100, __magic_name__=5, __magic_name__=1, __magic_name__=1, __magic_name__=249, __magic_name__=6, __magic_name__=17, __magic_name__=25, __magic_name__=4, __magic_name__=4, __magic_name__=128, __magic_name__=0.1, __magic_name__=0.1, __magic_name__=0.1, __magic_name__=0.0006, __magic_name__=512, __magic_name__=0.02, __magic_name__=1E-12, __magic_name__=1, __magic_name__=True, __magic_name__=1, __magic_name__=50256, __magic_name__=50256, **__magic_name__, ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : str = vocab_size
UpperCamelCase__ : int = action_weight
UpperCamelCase__ : Optional[int] = reward_weight
UpperCamelCase__ : Tuple = value_weight
UpperCamelCase__ : str = max_position_embeddings
UpperCamelCase__ : int = block_size
UpperCamelCase__ : Dict = action_dim
UpperCamelCase__ : Tuple = observation_dim
UpperCamelCase__ : str = transition_dim
UpperCamelCase__ : Any = learning_rate
UpperCamelCase__ : str = n_layer
UpperCamelCase__ : Union[str, Any] = n_head
UpperCamelCase__ : List[Any] = n_embd
UpperCamelCase__ : str = embd_pdrop
UpperCamelCase__ : Optional[int] = attn_pdrop
UpperCamelCase__ : Any = resid_pdrop
UpperCamelCase__ : Dict = initializer_range
UpperCamelCase__ : Optional[Any] = layer_norm_eps
UpperCamelCase__ : Dict = kaiming_initializer_range
UpperCamelCase__ : Union[str, Any] = use_cache
super().__init__(pad_token_id=__magic_name__, bos_token_id=__magic_name__, eos_token_id=__magic_name__, **__magic_name__ )
| 369 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: List[str]=False , __UpperCAmelCase: List[Any]=False , __UpperCAmelCase: int=False ) -> Optional[Any]:
UpperCamelCase__ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"transformer.blocks.{i}.norm1.weight", f"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm1.bias", f"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.weight", f"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(f"transformer.blocks.{i}.attn.proj.bias", f"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.norm2.weight", f"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"transformer.blocks.{i}.norm2.bias", f"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(f"transformer.blocks.{i}.mlp.fc1.weight", f"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc1.bias", f"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.weight", f"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"transformer.blocks.{i}.mlp.fc2.bias", f"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: List[Any] ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
UpperCamelCase__ : Tuple = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCamelCase__ : Dict = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.weight" )
UpperCamelCase__ : Tuple = state_dict.pop(f"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ : Union[str, Any] = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ : Optional[Any] = in_proj_bias[: config.hidden_size]
UpperCamelCase__ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCamelCase__ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ : List[Any] = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] ) -> Any:
UpperCamelCase__ : int = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: Dict , __UpperCAmelCase: int ) -> List[str]:
UpperCamelCase__ : str = dct.pop(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = val
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: Union[str, Any] ) -> int:
UpperCamelCase__ : Any = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=__UpperCAmelCase )
UpperCamelCase__ : Any = False
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : List[Any] = False
UpperCamelCase__ : int = False
if "vqa" in checkpoint_url:
UpperCamelCase__ : Any = True
UpperCamelCase__ : Optional[int] = 3129
UpperCamelCase__ : Dict = '''huggingface/label-files'''
UpperCamelCase__ : Optional[Any] = '''vqa2-id2label.json'''
UpperCamelCase__ : Optional[int] = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase__ : List[str] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ : str = idalabel
UpperCamelCase__ : List[Any] = {v: k for k, v in idalabel.items()}
UpperCamelCase__ : Tuple = ViltForQuestionAnswering(__UpperCAmelCase )
elif "nlvr" in checkpoint_url:
UpperCamelCase__ : Optional[Any] = True
UpperCamelCase__ : Union[str, Any] = 2
UpperCamelCase__ : int = {0: '''False''', 1: '''True'''}
UpperCamelCase__ : Optional[Any] = {v: k for k, v in config.idalabel.items()}
UpperCamelCase__ : Tuple = 3
UpperCamelCase__ : Optional[Any] = ViltForImagesAndTextClassification(__UpperCAmelCase )
elif "irtr" in checkpoint_url:
UpperCamelCase__ : List[str] = True
UpperCamelCase__ : Any = ViltForImageAndTextRetrieval(__UpperCAmelCase )
elif "mlm_itm" in checkpoint_url:
UpperCamelCase__ : Any = True
UpperCamelCase__ : int = ViltForMaskedLM(__UpperCAmelCase )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ : int = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='''cpu''' )['''state_dict''']
UpperCamelCase__ : Dict = create_rename_keys(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase )
if mlm_model or irtr_model:
UpperCamelCase__ : str = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
UpperCamelCase__ ,UpperCamelCase__ : Optional[Any] = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(__UpperCAmelCase )
# Define processor
UpperCamelCase__ : Union[str, Any] = ViltImageProcessor(size=384 )
UpperCamelCase__ : List[str] = BertTokenizer.from_pretrained('''bert-base-uncased''' )
UpperCamelCase__ : Optional[int] = ViltProcessor(__UpperCAmelCase , __UpperCAmelCase )
# Forward pass on example inputs (image + text)
if nlvr_model:
UpperCamelCase__ : Optional[int] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__UpperCAmelCase ).raw )
UpperCamelCase__ : Union[str, Any] = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=__UpperCAmelCase ).raw )
UpperCamelCase__ : Dict = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
UpperCamelCase__ : str = processor(__UpperCAmelCase , __UpperCAmelCase , return_tensors='''pt''' )
UpperCamelCase__ : Optional[Any] = processor(__UpperCAmelCase , __UpperCAmelCase , return_tensors='''pt''' )
UpperCamelCase__ : Union[str, Any] = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
UpperCamelCase__ : Union[str, Any] = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=__UpperCAmelCase ).raw )
if mlm_model:
UpperCamelCase__ : int = '''a bunch of [MASK] laying on a [MASK].'''
else:
UpperCamelCase__ : Optional[Any] = '''How many cats are there?'''
UpperCamelCase__ : List[Any] = processor(__UpperCAmelCase , __UpperCAmelCase , return_tensors='''pt''' )
UpperCamelCase__ : Union[str, Any] = model(**__UpperCAmelCase )
# Verify outputs
if mlm_model:
UpperCamelCase__ : str = torch.Size([1, 11, 3_0522] )
UpperCamelCase__ : Optional[Any] = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __UpperCAmelCase , atol=1e-4 )
# verify masked token prediction equals "cats"
UpperCamelCase__ : Optional[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
UpperCamelCase__ : List[Any] = torch.Size([1, 3129] )
UpperCamelCase__ : Tuple = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , __UpperCAmelCase , atol=1e-4 )
# verify vqa prediction equals "2"
UpperCamelCase__ : Any = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
UpperCamelCase__ : Dict = torch.Size([1, 2] )
UpperCamelCase__ : str = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase_ = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 369 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
A__ : Optional[Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
__lowerCamelCase : Optional[int] = UniSpeechSatForSequenceClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = downstream_dict['projector.weight']
__lowerCamelCase : List[str] = downstream_dict['projector.bias']
__lowerCamelCase : List[str] = downstream_dict['model.post_net.linear.weight']
__lowerCamelCase : List[Any] = downstream_dict['model.post_net.linear.bias']
return model
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ) -> List[str]:
__lowerCamelCase : Optional[Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
__lowerCamelCase : str = downstream_dict['model.linear.weight']
__lowerCamelCase : List[str] = downstream_dict['model.linear.bias']
return model
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str ) -> Optional[int]:
__lowerCamelCase : Any = UniSpeechSatForXVector.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
__lowerCamelCase : Dict = downstream_dict['connector.weight']
__lowerCamelCase : Dict = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowerCamelCase : Optional[int] = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
__lowerCamelCase : Optional[int] = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
__lowerCamelCase : Union[str, Any] = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
__lowerCamelCase : Tuple = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
__lowerCamelCase : Any = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
__lowerCamelCase : Tuple = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
__lowerCamelCase : Dict = downstream_dict['objective.W']
return model
@torch.no_grad()
def UpperCAmelCase__ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] ) -> Dict:
__lowerCamelCase : str = torch.load(UpperCAmelCase_ , map_location='cpu' )
__lowerCamelCase : Optional[int] = checkpoint['Downstream']
__lowerCamelCase : str = UniSpeechSatConfig.from_pretrained(UpperCAmelCase_ )
__lowerCamelCase : str = WavaVecaFeatureExtractor.from_pretrained(
UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ )
__lowerCamelCase : Optional[Any] = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification' ):
__lowerCamelCase : Optional[Any] = convert_classification(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif arch.endswith('ForAudioFrameClassification' ):
__lowerCamelCase : str = convert_diarization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif arch.endswith('ForXVector' ):
__lowerCamelCase : Any = convert_xvector(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
__lowerCamelCase : Optional[Any] = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
A__ : Optional[int] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 13 | from manim import *
class a__ ( __snake_case ):
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
__a = Rectangle(height=0.5 , width=0.5 )
__a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a = Rectangle(height=0.25 , width=0.25 )
__a = [mem.copy() for i in range(6 )]
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('CPU' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
__a = [mem.copy() for i in range(4 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('GPU' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(UpperCAmelCase )
__a = [mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('Model' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(UpperCAmelCase )
__a = []
__a = []
for i, rect in enumerate(UpperCAmelCase ):
__a = fill.copy().set_fill(UpperCAmelCase , opacity=0.8 )
target.move_to(UpperCAmelCase )
model_arr.append(UpperCAmelCase )
__a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(UpperCAmelCase )
self.add(*UpperCAmelCase , *UpperCAmelCase )
__a = [meta_mem.copy() for i in range(6 )]
__a = [meta_mem.copy() for i in range(6 )]
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__a = Text('Disk' , font_size=2_4 )
__a = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
disk.move_to([-4, -1.25, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
__a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
self.add(UpperCAmelCase , UpperCAmelCase )
__a = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=1_8 , )
blue_text.next_to(UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(UpperCAmelCase )
__a = MarkupText(
f'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase ) )
__a = Square(0.3 )
input.set_fill(UpperCAmelCase , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , UpperCAmelCase , buff=0.5 )
self.play(Write(UpperCAmelCase ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=UpperCAmelCase , buff=0.02 )
self.play(MoveToTarget(UpperCAmelCase ) )
self.play(FadeOut(UpperCAmelCase ) )
__a = Arrow(start=UpperCAmelCase , end=UpperCAmelCase , color=UpperCAmelCase , buff=0.5 )
a.next_to(model_arr[0].get_left() , UpperCAmelCase , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
__a = MarkupText(
f'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=2_4 , )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) )
__a = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(UpperCAmelCase ) , Circumscribe(model_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_cpu_arr[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
__a = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , UpperCAmelCase , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
__a = AnimationGroup(
FadeOut(UpperCAmelCase , run_time=0.5 ) , MoveToTarget(UpperCAmelCase , run_time=0.5 ) , FadeIn(UpperCAmelCase , run_time=0.5 ) , lag_ratio=0.2 )
self.play(UpperCAmelCase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
__a = 0.7
self.play(
Circumscribe(model_arr[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i] , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(model_arr[i + 1] , color=UpperCAmelCase , **UpperCAmelCase ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(cpu_left_col_base[-1] , color=UpperCAmelCase , **UpperCAmelCase ) , Circumscribe(gpu_rect[0] , color=UpperCAmelCase , **UpperCAmelCase ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
__a = a_c
__a = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(UpperCAmelCase ) , FadeOut(UpperCAmelCase , run_time=0.5 ) , )
__a = MarkupText(f'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=3 ) , MoveToTarget(UpperCAmelCase ) )
self.wait()
| 559 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase = {
'''configuration_blenderbot_small''': [
'''BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotSmallConfig''',
'''BlenderbotSmallOnnxConfig''',
],
'''tokenization_blenderbot_small''': ['''BlenderbotSmallTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = ['''BlenderbotSmallTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'''BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotSmallForCausalLM''',
'''BlenderbotSmallForConditionalGeneration''',
'''BlenderbotSmallModel''',
'''BlenderbotSmallPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'''TFBlenderbotSmallForConditionalGeneration''',
'''TFBlenderbotSmallModel''',
'''TFBlenderbotSmallPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
'''FlaxBlenderbotSmallForConditionalGeneration''',
'''FlaxBlenderbotSmallModel''',
'''FlaxBlenderbotSmallPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 706 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase ( lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def UpperCamelCase ( lowercase_ : np.ndarray , lowercase_ : Optional[str] , lowercase_ : Optional[str] = None ) -> Tuple:
'''simple docstring'''
lowercase =tesseract_config if tesseract_config is not None else ''''''
# apply OCR
lowercase =to_pil_image(lowercase_ )
lowercase , lowercase =pil_image.size
lowercase =pytesseract.image_to_data(lowercase_ , lang=lowercase_ , output_type='''dict''' , config=lowercase_ )
lowercase , lowercase , lowercase , lowercase , lowercase =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase =[idx for idx, word in enumerate(lowercase_ ) if not word.strip()]
lowercase =[word for idx, word in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
lowercase =[coord for idx, coord in enumerate(lowercase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase =[]
for x, y, w, h in zip(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
lowercase =[x, y, x + w, y + h]
actual_boxes.append(lowercase_ )
# finally, normalize the bounding boxes
lowercase =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowercase_ , lowercase_ , lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ['pixel_values']
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = True , snake_case_ = None , snake_case_ = "" , **snake_case_ , ):
super().__init__(**snake_case_ )
lowercase =size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase =get_size_dict(snake_case_ )
lowercase =do_resize
lowercase =size
lowercase =resample
lowercase =apply_ocr
lowercase =ocr_lang
lowercase =tesseract_config
def _A( self , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BILINEAR , snake_case_ = None , **snake_case_ , ):
lowercase =get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase =(size['''height'''], size['''width'''])
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def _A( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
lowercase =do_resize if do_resize is not None else self.do_resize
lowercase =size if size is not None else self.size
lowercase =get_size_dict(snake_case_ )
lowercase =resample if resample is not None else self.resample
lowercase =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase =make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
lowercase =[to_numpy_array(snake_case_ ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase =[]
lowercase =[]
for image in images:
lowercase , lowercase =apply_tesseract(snake_case_ , snake_case_ , snake_case_ )
words_batch.append(snake_case_ )
boxes_batch.append(snake_case_ )
if do_resize:
lowercase =[self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowercase =[flip_channel_order(snake_case_ ) for image in images]
lowercase =[to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
lowercase =BatchFeature(data={'''pixel_values''': images} , tensor_type=snake_case_ )
if apply_ocr:
lowercase =words_batch
lowercase =boxes_batch
return data
| 145 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Tuple = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE_: Tuple = 'BlipImageProcessor'
SCREAMING_SNAKE_CASE_: Union[str, Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
_SCREAMING_SNAKE_CASE : List[str] = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : List[Any] = self.image_processor
def __call__( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = True , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = 0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = True , lowerCAmelCase_ = None , **lowerCAmelCase_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
_SCREAMING_SNAKE_CASE : str = self.tokenizer
_SCREAMING_SNAKE_CASE : str = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
# add pixel_values
_SCREAMING_SNAKE_CASE : int = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
if text is not None:
_SCREAMING_SNAKE_CASE : int = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE : Tuple = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def A ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def A ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def A ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 621 |
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowercase__ ( lowerCamelCase ):
return EnvironmentCommand()
class _lowerCAmelCase ( __UpperCAmelCase ):
@staticmethod
def A ( lowerCAmelCase_ ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = parser.add_parser('env' )
download_parser.set_defaults(func=lowerCAmelCase_ )
def A ( self ) -> Any:
_SCREAMING_SNAKE_CASE : int = huggingface_hub.__version__
_SCREAMING_SNAKE_CASE : Optional[Any] = 'not installed'
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'NA'
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.__version__
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cuda.is_available()
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'not installed'
if is_transformers_available():
import transformers
_SCREAMING_SNAKE_CASE : Tuple = transformers.__version__
_SCREAMING_SNAKE_CASE : Optional[int] = 'not installed'
if is_accelerate_available():
import accelerate
_SCREAMING_SNAKE_CASE : str = accelerate.__version__
_SCREAMING_SNAKE_CASE : str = 'not installed'
if is_xformers_available():
import xformers
_SCREAMING_SNAKE_CASE : Optional[int] = xformers.__version__
_SCREAMING_SNAKE_CASE : Any = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def A ( lowerCAmelCase_ ) -> int:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 621 | 1 |
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case = [0, 25, 50]
snake_case = [25, 50, 75]
snake_case = fuzz.membership.trimf(X, abca)
snake_case = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case = np.ones(75)
snake_case = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title("""Young""")
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title("""Middle aged""")
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title("""union""")
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title("""intersection""")
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title("""complement_a""")
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title("""difference a/b""")
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title("""alg_sum""")
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title("""alg_product""")
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title("""bdd_sum""")
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title("""bdd_difference""")
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 721 |
'''simple docstring'''
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : Union[str, Any] = OpenAIGPTTokenizer
A_ : Optional[int] = OpenAIGPTTokenizerFast
A_ : Optional[int] = True
A_ : Any = False
def _A ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCAmelCase__ : List[Any] = dict(zip(a__ , range(len(a__ ) ) ) )
lowerCAmelCase__ : Tuple = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowerCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(a__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(a__ ) )
def _A ( self : Union[str, Any] , a__ : str ):
'''simple docstring'''
return "lower newer", "lower newer"
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ : Union[str, Any] = "lower"
lowerCAmelCase__ : List[Any] = ["low", "er</w>"]
lowerCAmelCase__ : Dict = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
lowerCAmelCase__ : Tuple = tokens + ["<unk>"]
lowerCAmelCase__ : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def _A ( self : Union[str, Any] , a__ : List[str]=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
# Simple input
lowerCAmelCase__ : Tuple = "This is a simple input"
lowerCAmelCase__ : Tuple = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase__ : int = ("This is a simple input", "This is a pair")
lowerCAmelCase__ : Union[str, Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="max_length" )
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="max_length" )
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="max_length" , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding="max_length" )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding="max_length" )
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding="max_length" , )
def _A ( self : Any ):
'''simple docstring'''
pass
@require_ftfy
@require_spacy
@require_tokenizers
class lowerCAmelCase ( UpperCamelCase_ ):
pass
| 568 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a_ ( __magic_name__ , __magic_name__=0.999 , __magic_name__="cosine" , ) -> List[str]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(__magic_name__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__magic_name__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
snake_case : Optional[int] = []
for i in range(a_ ):
snake_case : Dict = i / num_diffusion_timesteps
snake_case : List[Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a_ ) / alpha_bar_fn(a_ ) , a_ ) )
return torch.tensor(a_ , dtype=torch.floataa )
class a_ ( _A , _A ):
A__ : str = [e.name for e in KarrasDiffusionSchedulers]
A__ : Any = 2
@register_to_config
def __init__( self : str , UpperCAmelCase__ : Optional[int] = 1_000 , UpperCAmelCase__ : str = 0.0_0085 , UpperCAmelCase__ : Optional[int] = 0.012 , UpperCAmelCase__ : Tuple = "linear" , UpperCAmelCase__ : int = None , UpperCAmelCase__ : str = "epsilon" , UpperCAmelCase__ : str = False , UpperCAmelCase__ : List[Any] = False , UpperCAmelCase__ : Any = 1.0 , UpperCAmelCase__ : Union[str, Any] = "linspace" , UpperCAmelCase__ : Tuple = 0 , ):
"""simple docstring"""
if trained_betas is not None:
snake_case : List[Any] = torch.tensor(a_ , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case : List[str] = torch.linspace(a_ , a_ , a_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case : List[str] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , a_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case : List[Any] = betas_for_alpha_bar(a_ , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
snake_case : Any = betas_for_alpha_bar(a_ , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(F"{beta_schedule} does is not implemented for {self.__class__}" )
snake_case : Optional[int] = 1.0 - self.betas
snake_case : Optional[Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(a_ , a_ , a_ )
snake_case : Union[str, Any] = use_karras_sigmas
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any=None ):
"""simple docstring"""
if schedule_timesteps is None:
snake_case : List[str] = self.timesteps
snake_case : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case : Union[str, Any] = 1 if len(a_ ) > 1 else 0
else:
snake_case : Tuple = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep
snake_case : Any = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , ):
"""simple docstring"""
snake_case : Union[str, Any] = self.index_for_timestep(a_ )
snake_case : Any = self.sigmas[step_index]
snake_case : int = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int = None , UpperCAmelCase__ : Union[str, Any] = None , ):
"""simple docstring"""
snake_case : List[Any] = num_inference_steps
snake_case : Optional[int] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case : Optional[Any] = np.linspace(0 , num_train_timesteps - 1 , a_ , dtype=a_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case : str = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case : Dict = (np.arange(0 , a_ ) * step_ratio).round()[::-1].copy().astype(a_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case : Tuple = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case : int = (np.arange(a_ , 0 , -step_ratio )).round().copy().astype(a_ )
timesteps -= 1
else:
raise ValueError(
F"{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'." )
snake_case : Any = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case : Any = np.log(a_ )
snake_case : int = np.interp(a_ , np.arange(0 , len(a_ ) ) , a_ )
if self.config.use_karras_sigmas:
snake_case : int = self._convert_to_karras(in_sigmas=a_ , num_inference_steps=self.num_inference_steps )
snake_case : Any = np.array([self._sigma_to_t(a_ , a_ ) for sigma in sigmas] )
snake_case : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case : int = torch.from_numpy(a_ ).to(device=a_ )
snake_case : Any = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
snake_case : Union[str, Any] = torch.from_numpy(a_ )
snake_case : Union[str, Any] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(a_ ).startswith('''mps''' ):
# mps does not support float64
snake_case : str = timesteps.to(a_ , dtype=torch.floataa )
else:
snake_case : int = timesteps.to(device=a_ )
# empty dt and derivative
snake_case : Union[str, Any] = None
snake_case : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case : int = defaultdict(a_ )
def lowerCAmelCase( self : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple ):
"""simple docstring"""
# get log sigma
snake_case : str = np.log(a_ )
# get distribution
snake_case : int = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
snake_case : int = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
snake_case : Any = low_idx + 1
snake_case : str = log_sigmas[low_idx]
snake_case : Optional[Any] = log_sigmas[high_idx]
# interpolate sigmas
snake_case : Optional[Any] = (low - log_sigma) / (low - high)
snake_case : List[str] = np.clip(a_ , 0 , 1 )
# transform interpolation to time range
snake_case : int = (1 - w) * low_idx + w * high_idx
snake_case : Dict = t.reshape(sigma.shape )
return t
def lowerCAmelCase( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] ):
"""simple docstring"""
snake_case : float = in_sigmas[-1].item()
snake_case : float = in_sigmas[0].item()
snake_case : str = 7.0 # 7.0 is the value used in the paper
snake_case : Optional[int] = np.linspace(0 , 1 , a_ )
snake_case : Optional[int] = sigma_min ** (1 / rho)
snake_case : str = sigma_max ** (1 / rho)
snake_case : List[str] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
return self.dt is None
def lowerCAmelCase( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] = True , ):
"""simple docstring"""
snake_case : Optional[int] = self.index_for_timestep(a_ )
# advance index counter by 1
snake_case : List[str] = timestep.cpu().item() if torch.is_tensor(a_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case : Optional[Any] = self.sigmas[step_index]
snake_case : Dict = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
snake_case : List[str] = self.sigmas[step_index - 1]
snake_case : int = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case : int = 0
snake_case : Optional[int] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case : int = sigma_hat if self.state_in_first_order else sigma_next
snake_case : Tuple = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case : List[str] = sigma_hat if self.state_in_first_order else sigma_next
snake_case : int = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
snake_case : Optional[int] = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
snake_case : Optional[int] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case : Union[str, Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case : Optional[int] = sigma_next - sigma_hat
# store for 2nd order step
snake_case : List[str] = derivative
snake_case : Optional[int] = dt
snake_case : Dict = sample
else:
# 2. 2nd order / Heun's method
snake_case : Dict = (sample - pred_original_sample) / sigma_next
snake_case : Optional[int] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
snake_case : Optional[int] = self.dt
snake_case : Tuple = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
snake_case : Tuple = None
snake_case : Optional[Any] = None
snake_case : Union[str, Any] = None
snake_case : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a_ )
def lowerCAmelCase( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , ):
"""simple docstring"""
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case : Tuple = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a_ ):
# mps does not support float64
snake_case : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case : Dict = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case : List[Any] = self.timesteps.to(original_samples.device )
snake_case : str = timesteps.to(original_samples.device )
snake_case : Tuple = [self.index_for_timestep(a_ , a_ ) for t in timesteps]
snake_case : Dict = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case : Any = sigma.unsqueeze(-1 )
snake_case : str = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
"""simple docstring"""
return self.config.num_train_timesteps
| 598 |
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __A ( ):
lowerCAmelCase , lowerCAmelCase : List[Any] = 9, 1_4 # noqa: F841
lowerCAmelCase : int = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
lowerCAmelCase : Optional[Any] = defaultdict(a_ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowerCAmelCase : int = mst(a_ )
lowerCAmelCase : Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowerCAmelCase : List[str] = tuple(answer[:2] )
lowerCAmelCase : int = tuple(edge[::-1] )
assert edge in result or reverse in result
| 525 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __lowerCAmelCase ( unittest.TestCase ):
lowercase = MODEL_FOR_CAUSAL_LM_MAPPING
lowercase = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='pt' )
# Using `do_sample=False` to force deterministic output
__UpperCamelCase = text_generator('This is a test' , do_sample=__a )
self.assertEqual(
__a , [
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
] , )
__UpperCamelCase = text_generator(['This is a test', 'This is a second test'] )
self.assertEqual(
__a , [
[
{
'generated_text': (
'This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'
' oscope. FiliFili@@'
)
}
],
[
{
'generated_text': (
'This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'
' oscope. oscope. FiliFili@@'
)
}
],
] , )
__UpperCamelCase = text_generator('This is a test' , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{'generated_token_ids': ANY(__a )},
{'generated_token_ids': ANY(__a )},
] , )
__UpperCamelCase = text_generator.model.config.eos_token_id
__UpperCamelCase = '<pad>'
__UpperCamelCase = text_generator(
['This is a test', 'This is a second test'] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{'generated_token_ids': ANY(__a )},
{'generated_token_ids': ANY(__a )},
],
[
{'generated_token_ids': ANY(__a )},
{'generated_token_ids': ANY(__a )},
],
] , )
@require_tf
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = pipeline(task='text-generation' , model='sshleifer/tiny-ctrl' , framework='tf' )
# Using `do_sample=False` to force deterministic output
__UpperCamelCase = text_generator('This is a test' , do_sample=__a )
self.assertEqual(
__a , [
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
] , )
__UpperCamelCase = text_generator(['This is a test', 'This is a second test'] , do_sample=__a )
self.assertEqual(
__a , [
[
{
'generated_text': (
'This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'
' please,'
)
}
],
[
{
'generated_text': (
'This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'
' Cannes 閲閲Cannes Cannes Cannes 攵 please,'
)
}
],
] , )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = TextGenerationPipeline(model=__a , tokenizer=__a )
return text_generator, ["This is a test", "Another test"]
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 'Hello I believe in'
__UpperCamelCase = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
__UpperCamelCase = text_generator(__a )
self.assertEqual(
__a , [{'generated_text': 'Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'}] , )
__UpperCamelCase = text_generator(__a , stop_sequence=' fe' )
self.assertEqual(__a , [{'generated_text': 'Hello I believe in fe'}] )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = text_generator.model
__UpperCamelCase = text_generator.tokenizer
__UpperCamelCase = text_generator('This is a test' )
self.assertEqual(__a , [{'generated_text': ANY(__a )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
__UpperCamelCase = text_generator('This is a test' , return_full_text=__a )
self.assertEqual(__a , [{'generated_text': ANY(__a )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
__UpperCamelCase = pipeline(task='text-generation' , model=__a , tokenizer=__a , return_full_text=__a )
__UpperCamelCase = text_generator('This is a test' )
self.assertEqual(__a , [{'generated_text': ANY(__a )}] )
self.assertNotIn('This is a test' , outputs[0]['generated_text'] )
__UpperCamelCase = text_generator('This is a test' , return_full_text=__a )
self.assertEqual(__a , [{'generated_text': ANY(__a )}] )
self.assertTrue(outputs[0]['generated_text'].startswith('This is a test' ) )
__UpperCamelCase = text_generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{'generated_text': ANY(__a )}, {'generated_text': ANY(__a )}],
[{'generated_text': ANY(__a )}, {'generated_text': ANY(__a )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__UpperCamelCase = text_generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{'generated_text': ANY(__a )}, {'generated_text': ANY(__a )}],
[{'generated_text': ANY(__a )}, {'generated_text': ANY(__a )}],
] , )
with self.assertRaises(__a ):
__UpperCamelCase = text_generator('test' , return_full_text=__a , return_text=__a )
with self.assertRaises(__a ):
__UpperCamelCase = text_generator('test' , return_full_text=__a , return_tensors=__a )
with self.assertRaises(__a ):
__UpperCamelCase = text_generator('test' , return_text=__a , return_tensors=__a )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__UpperCamelCase = text_generator('' )
self.assertEqual(__a , [{'generated_text': ANY(__a )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__UpperCamelCase = text_generator('' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__UpperCamelCase = ['RwkvForCausalLM', 'XGLMForCausalLM', 'GPTNeoXForCausalLM']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('This is a test' * 500 , max_new_tokens=20 )
__UpperCamelCase = text_generator('This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__a ):
text_generator(
'This is a test' * 500 , handle_long_generation='hole' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCAmelCase ( self ):
'''simple docstring'''
import torch
# Classic `model_kwargs`
__UpperCamelCase = pipeline(
model='hf-internal-testing/tiny-random-bloom' , model_kwargs={'device_map': 'auto', 'torch_dtype': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__UpperCamelCase = pipe('This is a test' )
self.assertEqual(
__a , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__UpperCamelCase = pipe('This is a test' )
self.assertEqual(
__a , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__UpperCamelCase = pipe('This is a test' )
self.assertEqual(
__a , [
{
'generated_text': (
'This is a test test test test test test test test test test test test test test test test'
' test'
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCAmelCase ( self ):
'''simple docstring'''
import torch
__UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device=0 , torch_dtype=torch.floataa )
pipe('This is a test' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCAmelCase ( self ):
'''simple docstring'''
import torch
__UpperCamelCase = pipeline(model='hf-internal-testing/tiny-random-bloom' , device_map='auto' , torch_dtype=torch.floataa )
pipe('This is a test' , do_sample=__a , top_p=0.5 )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = 'Hello world'
__UpperCamelCase = pipeline('text-generation' , model='hf-internal-testing/tiny-random-gpt2' )
if text_generator.model.framework == "tf":
__UpperCamelCase = logging.get_logger('transformers.generation.tf_utils' )
else:
__UpperCamelCase = logging.get_logger('transformers.generation.utils' )
__UpperCamelCase = 'Both `max_new_tokens`' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__a ) as cl:
__UpperCamelCase = text_generator(__a , max_length=10 , max_new_tokens=1 )
self.assertIn(__a , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__a ) as cl:
__UpperCamelCase = text_generator(__a , max_new_tokens=1 )
self.assertNotIn(__a , cl.out )
with CaptureLogger(__a ) as cl:
__UpperCamelCase = text_generator(__a , max_length=10 )
self.assertNotIn(__a , cl.out )
| 712 |
"""simple docstring"""
import argparse
from collections import defaultdict
def A ( snake_case :Tuple , snake_case :List[Any] , snake_case :List[Any] , snake_case :Union[str, Any] , snake_case :Optional[Any] ) -> Union[str, Any]:
__UpperCamelCase = f'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(snake_case , 'r' ) as f:
__UpperCamelCase = f.readlines()
__UpperCamelCase = f'class {class_name}('
__UpperCamelCase = f'{4 * " "}def {test_name}('
__UpperCamelCase = f'{8 * " "}{correct_line.split()[0]}'
__UpperCamelCase = f'{1_6 * " "}{correct_line.split()[0]}'
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = 0
__UpperCamelCase = 0
__UpperCamelCase = []
for line in lines:
if line.startswith(snake_case ):
__UpperCamelCase = True
elif in_class and line.startswith(snake_case ):
__UpperCamelCase = True
elif in_class and in_func and (line.startswith(snake_case ) or line.startswith(snake_case )):
__UpperCamelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__UpperCamelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__UpperCamelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'{spaces * " "}{correct_line}' )
__UpperCamelCase = __UpperCamelCase = __UpperCamelCase = __UpperCamelCase = False
else:
new_lines.append(snake_case )
with open(snake_case , 'w' ) as f:
for line in new_lines:
f.write(snake_case )
def A ( snake_case :List[str] , snake_case :str=None ) -> Tuple:
if fail is not None:
with open(snake_case , 'r' ) as f:
__UpperCamelCase = {l.strip() for l in f.readlines()}
else:
__UpperCamelCase = None
with open(snake_case , 'r' ) as f:
__UpperCamelCase = f.readlines()
__UpperCamelCase = defaultdict(snake_case )
for line in correct_lines:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(snake_case , snake_case , snake_case , snake_case , snake_case )
if __name__ == "__main__":
UpperCamelCase : str = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
UpperCamelCase : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 293 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""CLIPFeatureExtractor"""]
UpperCamelCase_ = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
A__ : int = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
A__ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def a ( lowerCamelCase_ ):
'''simple docstring'''
if "://" in dataset_path:
lowercase__ = dataset_path.split('''://''' )[1]
return dataset_path
def a ( lowerCamelCase_ ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = not is_remote_filesystem(lowerCamelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase_ ) , fs._strip_protocol(lowerCamelCase_ ) )
else:
fs.mv(lowerCamelCase_ , lowerCamelCase_ , recursive=lowerCamelCase_ )
def a ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowercase__ = None
lowercase__ = None
lowercase__ = threading.Lock()
| 183 | 0 |
from __future__ import annotations
import numpy as np
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
return np.maximum(0 ,snake_case__ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 709 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __UpperCAmelCase (_UpperCAmelCase ):
__snake_case : int = "ctrl"
__snake_case : Dict = ["past_key_values"]
__snake_case : List[str] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self: Optional[Any] , UpperCAmelCase_: int=246_534 , UpperCAmelCase_: List[Any]=256 , UpperCAmelCase_: int=1_280 , UpperCAmelCase_: str=8_192 , UpperCAmelCase_: Optional[Any]=48 , UpperCAmelCase_: Optional[Any]=16 , UpperCAmelCase_: Optional[int]=0.1 , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: Union[str, Any]=1E-6 , UpperCAmelCase_: Optional[Any]=0.02 , UpperCAmelCase_: Dict=True , **UpperCAmelCase_: str , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = n_positions
_SCREAMING_SNAKE_CASE = n_embd
_SCREAMING_SNAKE_CASE = n_layer
_SCREAMING_SNAKE_CASE = n_head
_SCREAMING_SNAKE_CASE = dff
_SCREAMING_SNAKE_CASE = resid_pdrop
_SCREAMING_SNAKE_CASE = embd_pdrop
_SCREAMING_SNAKE_CASE = layer_norm_epsilon
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = use_cache
super().__init__(**UpperCAmelCase_ )
| 569 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Tuple = logging.get_logger(__name__)
lowercase : Dict = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'sew'
def __init__( self :Any , a :List[str]=3_2 , a :Tuple=7_6_8 , a :List[str]=1_2 , a :int=1_2 , a :str=3_0_7_2 , a :Optional[int]=2 , a :int="gelu" , a :Optional[int]=0.1 , a :Any=0.1 , a :Optional[Any]=0.1 , a :Optional[Any]=0.0 , a :Dict=0.1 , a :Any=0.1 , a :int=0.02 , a :str=1E-5 , a :Optional[int]="group" , a :int="gelu" , a :Optional[int]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , a :Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a :Union[str, Any]=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a :Union[str, Any]=False , a :int=1_2_8 , a :Dict=1_6 , a :Tuple=True , a :Union[str, Any]=0.05 , a :Any=1_0 , a :str=2 , a :int=0.0 , a :Tuple=1_0 , a :Optional[int]=0 , a :Dict="mean" , a :List[str]=False , a :List[str]=False , a :Dict=2_5_6 , a :int=0 , a :str=1 , a :Any=2 , **a :Optional[Any] , ) -> List[Any]:
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
__UpperCamelCase : Dict = hidden_size
__UpperCamelCase : List[str] = feat_extract_norm
__UpperCamelCase : List[str] = feat_extract_activation
__UpperCamelCase : Any = list(a )
__UpperCamelCase : Optional[int] = list(a )
__UpperCamelCase : str = list(a )
__UpperCamelCase : Optional[int] = conv_bias
__UpperCamelCase : Dict = num_conv_pos_embeddings
__UpperCamelCase : str = num_conv_pos_embedding_groups
__UpperCamelCase : Optional[int] = len(self.conv_dim )
__UpperCamelCase : List[str] = num_hidden_layers
__UpperCamelCase : List[Any] = intermediate_size
__UpperCamelCase : int = squeeze_factor
__UpperCamelCase : List[str] = hidden_act
__UpperCamelCase : int = num_attention_heads
__UpperCamelCase : Any = hidden_dropout
__UpperCamelCase : List[Any] = attention_dropout
__UpperCamelCase : Union[str, Any] = activation_dropout
__UpperCamelCase : Dict = feat_proj_dropout
__UpperCamelCase : Tuple = final_dropout
__UpperCamelCase : Union[str, Any] = layerdrop
__UpperCamelCase : Tuple = layer_norm_eps
__UpperCamelCase : Optional[Any] = initializer_range
__UpperCamelCase : Optional[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
f'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase : Dict = apply_spec_augment
__UpperCamelCase : Union[str, Any] = mask_time_prob
__UpperCamelCase : Any = mask_time_length
__UpperCamelCase : List[str] = mask_time_min_masks
__UpperCamelCase : int = mask_feature_prob
__UpperCamelCase : Optional[Any] = mask_feature_length
__UpperCamelCase : Any = mask_feature_min_masks
# ctc loss
__UpperCamelCase : Dict = ctc_loss_reduction
__UpperCamelCase : List[str] = ctc_zero_infinity
# sequence classification
__UpperCamelCase : Optional[Any] = use_weighted_layer_sum
__UpperCamelCase : str = classifier_proj_size
@property
def _lowerCamelCase ( self :str ) -> Tuple:
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 557 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
lowercase : List[Any] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]=None) -> List[str]:
'''simple docstring'''
if subparsers is not None:
__UpperCamelCase : int = subparsers.add_parser("tpu-config" , description=_description)
else:
__UpperCamelCase : Dict = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description)
# Core arguments
__UpperCamelCase : Optional[Any] = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`.")
config_args.add_argument(
"--config_file" , type=_lowerCamelCase , default=_lowerCamelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowerCamelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowerCamelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
__UpperCamelCase : Tuple = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU.")
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowerCamelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it.")
if subparsers is not None:
parser.set_defaults(func=_lowerCamelCase)
return parser
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any]) -> Dict:
'''simple docstring'''
__UpperCamelCase : List[str] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowerCamelCase):
__UpperCamelCase : Dict = load_config_from_file(args.config_file)
if not args.command_file and defaults.command_file is not None and not args.command:
__UpperCamelCase : List[Any] = defaults.command_file
if not args.command and defaults.commands is not None:
__UpperCamelCase : int = defaults.commands
if not args.tpu_name:
__UpperCamelCase : int = defaults.tpu_name
if not args.tpu_zone:
__UpperCamelCase : Optional[Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
__UpperCamelCase : List[str] = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
__UpperCamelCase : Optional[int] = "accelerate -U"
elif isinstance(parse(args.accelerate_version) , _lowerCamelCase):
__UpperCamelCase : Union[str, Any] = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod.")
if args.command_file:
with open(args.command_file , "r") as f:
__UpperCamelCase : List[Any] = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowerCamelCase):
__UpperCamelCase : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__UpperCamelCase : str = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
__UpperCamelCase : str = "; ".join(_lowerCamelCase)
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__UpperCamelCase : List[Any] = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(_lowerCamelCase)}')
return
subprocess.run(_lowerCamelCase)
print("Successfully setup pod.")
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = tpu_command_parser()
__UpperCamelCase : Optional[Any] = parser.parse_args()
tpu_command_launcher(_lowerCamelCase) | 557 | 1 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
lowercase : str = 'src/transformers'
# Matches is_xxx_available()
lowercase : Union[str, Any] = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
lowercase : Optional[Any] = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
lowercase : List[str] = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
lowercase : int = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
lowercase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
lowercase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
lowercase : Union[str, Any] = re.compile(R'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
lowercase : Optional[int] = re.compile(R'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
lowercase : Any = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
lowercase : Dict = re.compile(R'^\s*try:')
# Catches a line with else:
lowercase : int = re.compile(R'^\s*else:')
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if _re_test_backend.search(snake_case__ ) is None:
return None
A : Union[str, Any] = [b[0] for b in _re_backend.findall(snake_case__ )]
backends.sort()
return "_and_".join(snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
with open(snake_case__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
A : Tuple = f.readlines()
A : Optional[Any] = 0
while line_index < len(snake_case__ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(snake_case__ ):
return None
# First grab the objects without a specific backend in _import_structure
A : Union[str, Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
A : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(snake_case__ ):
A : Dict = _re_one_line_import_struct.search(snake_case__ ).groups()[0]
A : int = re.findall(R'''\[([^\]]+)\]''' , snake_case__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
A : str = _re_import_struct_key_value.search(snake_case__ )
if single_line_import_search is not None:
A : Tuple = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
A : Union[str, Any] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
A : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
A : Dict = lines[line_index]
if _re_import_struct_add_one.search(snake_case__ ) is not None:
objects.append(_re_import_struct_add_one.search(snake_case__ ).groups()[0] )
elif _re_import_struct_add_many.search(snake_case__ ) is not None:
A : str = _re_import_struct_add_many.search(snake_case__ ).groups()[0].split(''', ''' )
A : List[Any] = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif _re_between_brackets.search(snake_case__ ) is not None:
A : Any = _re_between_brackets.search(snake_case__ ).groups()[0].split(''', ''' )
A : List[str] = [obj[1:-1] for obj in imports if len(snake_case__ ) > 0]
objects.extend(snake_case__ )
elif _re_quote_object.search(snake_case__ ) is not None:
objects.append(_re_quote_object.search(snake_case__ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
A : List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
A : Tuple = []
while (
line_index < len(snake_case__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
A : str = lines[line_index]
A : Optional[int] = _re_import.search(snake_case__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
A : Union[str, Any] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(snake_case__ ):
# If the line is an if is_backend_available, we grab all objects associated.
A : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
A : int = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
A : List[Any] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
A : str = lines[line_index]
A : int = _re_import.search(snake_case__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
A : Tuple = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
def find_duplicates(snake_case__ ):
return [k for k, v in collections.Counter(snake_case__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
A : str = []
for key in import_dict_objects.keys():
A : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
A : Optional[int] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
A : Optional[int] = '''base imports''' if key == '''none''' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Optional[int] = []
for root, _, files in os.walk(snake_case__ ):
if "__init__.py" in files:
A : Union[str, Any] = os.path.join(snake_case__ , '''__init__.py''' )
A : str = parse_init(snake_case__ )
if objects is not None:
A : Optional[Any] = analyze_results(*snake_case__ )
if len(snake_case__ ) > 0:
A : str = F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('''\n'''.join(snake_case__ ) )
if len(snake_case__ ) > 0:
raise ValueError('''\n\n'''.join(snake_case__ ) )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Dict = []
for path, directories, files in os.walk(snake_case__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(snake_case__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(snake_case__ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
A : int = str((Path(snake_case__ ) / folder).relative_to(snake_case__ ) )
A : Tuple = short_path.replace(os.path.sep , '''.''' )
submodules.append(snake_case__ )
for fname in files:
if fname == "__init__.py":
continue
A : List[Any] = str((Path(snake_case__ ) / fname).relative_to(snake_case__ ) )
A : str = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(snake_case__ )
return submodules
lowercase : Any = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def lowerCAmelCase_ ( ):
'''simple docstring'''
from transformers.utils import direct_transformers_import
A : Any = direct_transformers_import(snake_case__ )
A : Any = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(snake_case__ , '''__init__.py''' ) , '''r''' ) as f:
A : Union[str, Any] = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , snake_case__ ) ) )
A : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(snake_case__ ) > 0:
A : List[str] = '''\n'''.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'{list_of_modules}\n'
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 343 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ = 50 ):
'''simple docstring'''
A : Any = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 343 | 1 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def a__ ( _UpperCamelCase : Union[str, Any] ,_UpperCamelCase : List[str]=7 ):
__lowerCamelCase = None
if token is not None:
__lowerCamelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
__lowerCamelCase = """636036"""
__lowerCamelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
__lowerCamelCase = requests.get(__UpperCAmelCase ,headers=__UpperCAmelCase ).json()
return result["workflow_runs"]
def a__ ( _UpperCamelCase : Tuple ):
__lowerCamelCase = get_daily_ci_runs(__UpperCAmelCase )
__lowerCamelCase = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__lowerCamelCase = workflow_run["""id"""]
break
return workflow_run_id
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Tuple ):
__lowerCamelCase = get_last_daily_ci_runs(__UpperCAmelCase )
if workflow_run_id is not None:
__lowerCamelCase = get_artifacts_links(worflow_run_id=__UpperCAmelCase ,token=__UpperCAmelCase )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__lowerCamelCase = artifacts_links[artifact_name]
download_artifact(
artifact_name=__UpperCAmelCase ,artifact_url=__UpperCAmelCase ,output_dir=__UpperCAmelCase ,token=__UpperCAmelCase )
def a__ ( _UpperCamelCase : Any ,_UpperCamelCase : Tuple ,_UpperCamelCase : str ):
get_last_daily_ci_artifacts(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
__lowerCamelCase = {}
for artifact_name in artifact_names:
__lowerCamelCase = os.path.join(__UpperCAmelCase ,F"""{artifact_name}.zip""" )
if os.path.isfile(__UpperCAmelCase ):
__lowerCamelCase = {}
with zipfile.ZipFile(__UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__UpperCAmelCase ):
# read the file
with z.open(__UpperCAmelCase ) as f:
__lowerCamelCase = f.read().decode('''UTF-8''' )
return results
| 175 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "efficientnet"
def __init__( self : Optional[Any] , __snake_case : int = 3 , __snake_case : int = 6_0_0 , __snake_case : float = 2.0 , __snake_case : float = 3.1 , __snake_case : int = 8 , __snake_case : List[int] = [3, 3, 5, 3, 5, 5, 3] , __snake_case : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , __snake_case : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , __snake_case : List[int] = [] , __snake_case : List[int] = [1, 2, 2, 2, 1, 2, 1] , __snake_case : List[int] = [1, 2, 2, 3, 3, 4, 1] , __snake_case : List[int] = [1, 6, 6, 6, 6, 6, 6] , __snake_case : float = 0.25 , __snake_case : str = "swish" , __snake_case : int = 2_5_6_0 , __snake_case : str = "mean" , __snake_case : float = 0.02 , __snake_case : float = 0.001 , __snake_case : float = 0.99 , __snake_case : float = 0.5 , __snake_case : float = 0.2 , **__snake_case : List[Any] , ) -> List[Any]:
super().__init__(**__snake_case )
__magic_name__: str = num_channels
__magic_name__: List[str] = image_size
__magic_name__: List[str] = width_coefficient
__magic_name__: Optional[Any] = depth_coefficient
__magic_name__: Tuple = depth_divisor
__magic_name__: Dict = kernel_sizes
__magic_name__: int = in_channels
__magic_name__: str = out_channels
__magic_name__: Dict = depthwise_padding
__magic_name__: Union[str, Any] = strides
__magic_name__: Dict = num_block_repeats
__magic_name__: Tuple = expand_ratios
__magic_name__: List[str] = squeeze_expansion_ratio
__magic_name__: Any = hidden_act
__magic_name__: Tuple = hidden_dim
__magic_name__: int = pooling_type
__magic_name__: int = initializer_range
__magic_name__: List[str] = batch_norm_eps
__magic_name__: str = batch_norm_momentum
__magic_name__: List[str] = dropout_rate
__magic_name__: Dict = drop_connect_rate
__magic_name__: Optional[Any] = sum(__snake_case ) * 4
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = version.parse("1.11" )
@property
def lowerCamelCase__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase__ ( self : List[Any] ) -> float:
return 1E-5
| 96 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"""facebook/xlm-roberta-xl""": """https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json""",
"""facebook/xlm-roberta-xxl""": """https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json""",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "xlm-roberta-xl"
def __init__( self : int , snake_case__ : Any=25_08_80 , snake_case__ : Dict=25_60 , snake_case__ : str=36 , snake_case__ : int=32 , snake_case__ : List[str]=1_02_40 , snake_case__ : Dict="gelu" , snake_case__ : Any=0.1 , snake_case__ : Any=0.1 , snake_case__ : List[Any]=5_14 , snake_case__ : Any=1 , snake_case__ : List[Any]=0.02 , snake_case__ : Dict=1E-05 , snake_case__ : Union[str, Any]=1 , snake_case__ : Dict=0 , snake_case__ : Optional[Any]=2 , snake_case__ : List[Any]="absolute" , snake_case__ : List[str]=True , snake_case__ : Dict=None , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ )
A =vocab_size
A =hidden_size
A =num_hidden_layers
A =num_attention_heads
A =hidden_act
A =intermediate_size
A =hidden_dropout_prob
A =attention_probs_dropout_prob
A =max_position_embeddings
A =type_vocab_size
A =initializer_range
A =layer_norm_eps
A =position_embedding_type
A =use_cache
A =classifier_dropout
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
@property
def _a ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
A ={0: "batch", 1: "choice", 2: "sequence"}
else:
A ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 700 |
def UpperCamelCase_ ( a_ , a_ ) ->list[int]:
A =int(a_ )
# Initialize Result
A =[]
# Traverse through all denomination
for denomination in reversed(a_ ):
# Find denominations
while int(a_ ) >= int(a_ ):
total_value -= int(a_ )
answer.append(a_ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__a = []
__a = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
__a = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'''Denomination {i}: ''').strip()))
__a = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
__a = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
__a = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'''Following is minimal change for {value}: ''')
__a = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 689 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Dict = logging.get_logger(__name__)
snake_case : Tuple = {
"google/efficientnet-b7": "https://huggingface.co/google/efficientnet-b7/resolve/main/config.json",
}
class UpperCamelCase__ ( lowercase_):
"""simple docstring"""
__UpperCAmelCase = "efficientnet"
def __init__( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] = 3 , UpperCamelCase_ : List[str] = 6_0_0 , UpperCamelCase_ : Tuple = 2.0 , UpperCamelCase_ : Optional[int] = 3.1 , UpperCamelCase_ : int = 8 , UpperCamelCase_ : Tuple = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase_ : Union[str, Any] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , UpperCamelCase_ : int = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , UpperCamelCase_ : Dict = [] , UpperCamelCase_ : Tuple = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase_ : Optional[Any] = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase_ : Optional[Any] = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase_ : Tuple = 0.25 , UpperCamelCase_ : List[str] = "swish" , UpperCamelCase_ : List[Any] = 2_5_6_0 , UpperCamelCase_ : Dict = "mean" , UpperCamelCase_ : Tuple = 0.02 , UpperCamelCase_ : str = 0.001 , UpperCamelCase_ : Union[str, Any] = 0.99 , UpperCamelCase_ : Optional[Any] = 0.5 , UpperCamelCase_ : Tuple = 0.2 , **UpperCamelCase_ : List[str] , ):
'''simple docstring'''
super().__init__(**a__ )
__magic_name__ = num_channels
__magic_name__ = image_size
__magic_name__ = width_coefficient
__magic_name__ = depth_coefficient
__magic_name__ = depth_divisor
__magic_name__ = kernel_sizes
__magic_name__ = in_channels
__magic_name__ = out_channels
__magic_name__ = depthwise_padding
__magic_name__ = strides
__magic_name__ = num_block_repeats
__magic_name__ = expand_ratios
__magic_name__ = squeeze_expansion_ratio
__magic_name__ = hidden_act
__magic_name__ = hidden_dim
__magic_name__ = pooling_type
__magic_name__ = initializer_range
__magic_name__ = batch_norm_eps
__magic_name__ = batch_norm_momentum
__magic_name__ = dropout_rate
__magic_name__ = drop_connect_rate
__magic_name__ = sum(a__ ) * 4
class UpperCamelCase__ ( lowercase_):
"""simple docstring"""
__UpperCAmelCase = version.parse("""1.11""")
@property
def a__ ( self : List[Any] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a__ ( self : int ):
'''simple docstring'''
return 1e-5 | 545 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
snake_case_ = str(bin(snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
snake_case_ = str(bin(snake_case ) )[2:]
if shift_amount >= len(snake_case ):
return "0b0"
snake_case_ = binary_number[: len(snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def UpperCamelCase_( snake_case : int , snake_case : int ):
'''simple docstring'''
if number >= 0: # Get binary representation of positive number
snake_case_ = "0" + str(bin(snake_case ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
snake_case_ = len(bin(snake_case )[3:] ) # Find 2's complement of number
snake_case_ = bin(abs(snake_case ) - (1 << binary_number_length) )[3:]
snake_case_ = (
"1" + "0" * (binary_number_length - len(snake_case )) + binary_number
)
if shift_amount >= len(snake_case ):
return "0b" + binary_number[0] * len(snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 400 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _UpperCAmelCase :
def __init__( self , lowercase_ , lowercase_=1_3 , lowercase_=7 , lowercase_=False , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=3_3 , lowercase_=3_2 , lowercase_=5 , lowercase_=4 , lowercase_=3_7 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=5_1_2 , lowercase_=1_6 , lowercase_=2 , lowercase_=0.0_2 , lowercase_=3 , lowercase_=4 , lowercase_=None , ) -> Tuple:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def a_ ( self ) -> List[Any]:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a_ ( self ) -> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
UpperCAmelCase = EsmModel(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase = model(__a , attention_mask=__a )
UpperCAmelCase = model(__a )
UpperCAmelCase = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
UpperCAmelCase = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
UpperCAmelCase = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = self.prepare_config_and_inputs()
(
UpperCAmelCase
) = config_and_inputs
UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __lowercase , __lowercase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : Dict = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : Dict = ()
__SCREAMING_SNAKE_CASE : Optional[int] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Any = True
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = EsmModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def a_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a_ ( self ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*__a )
def a_ ( self ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a_ ( self ) -> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a_ ( self ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase = EsmEmbeddings(config=__a )
UpperCAmelCase = torch.as_tensor([[1_2, 3_1, 1_3, model.padding_idx]] )
UpperCAmelCase = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
UpperCAmelCase = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a_ ( self ) -> int:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()[0]
UpperCAmelCase = EsmEmbeddings(config=__a )
UpperCAmelCase = torch.empty(2 , 4 , 3_0 )
UpperCAmelCase = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
UpperCAmelCase = torch.as_tensor([expected_single_positions, expected_single_positions] )
UpperCAmelCase = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def a_ ( self ) -> Optional[Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def a_ ( self ) -> Dict:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a_ ( self ) -> Dict:
pass
@require_torch
class _UpperCAmelCase ( __lowercase ):
@slow
def a_ ( self ) -> Union[str, Any]:
with torch.no_grad():
UpperCAmelCase = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase = model(__a )[0]
UpperCAmelCase = 3_3
UpperCAmelCase = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
UpperCAmelCase = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def a_ ( self ) -> str:
with torch.no_grad():
UpperCAmelCase = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
UpperCAmelCase = torch.tensor([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
UpperCAmelCase = model(__a )[0]
# compare the actual values for a slice.
UpperCAmelCase = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 720 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def lowercase__ ( lowerCAmelCase : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = DPTConfig()
if "large" in checkpoint_url:
UpperCAmelCase = 1_024
UpperCAmelCase = 4_096
UpperCAmelCase = 24
UpperCAmelCase = 16
UpperCAmelCase = [5, 11, 17, 23]
UpperCAmelCase = [256, 512, 1_024, 1_024]
UpperCAmelCase = (1, 384, 384)
if "ade" in checkpoint_url:
UpperCAmelCase = True
UpperCAmelCase = 150
UpperCAmelCase = 'huggingface/label-files'
UpperCAmelCase = 'ade20k-id2label.json'
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) ) , 'r' ) )
UpperCAmelCase = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = [1, 150, 480, 480]
return config, expected_shape
def lowercase__ ( lowerCAmelCase : str ) -> Any:
"""simple docstring"""
UpperCAmelCase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def lowercase__ ( lowerCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
UpperCAmelCase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
UpperCAmelCase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
UpperCAmelCase = name.replace('patch_embed' , 'patch_embeddings' )
if "pos_embed" in name:
UpperCAmelCase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
UpperCAmelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
UpperCAmelCase = name.replace('proj' , 'projection' )
if "blocks" in name:
UpperCAmelCase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
UpperCAmelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
UpperCAmelCase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name:
UpperCAmelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
UpperCAmelCase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
UpperCAmelCase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
UpperCAmelCase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
UpperCAmelCase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
UpperCAmelCase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
UpperCAmelCase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
UpperCAmelCase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
UpperCAmelCase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
UpperCAmelCase = name.replace(F"refinenet{layer_idx}" , F"fusion_stage.layers.{abs(layer_idx-4 )}" )
if "out_conv" in name:
UpperCAmelCase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
UpperCAmelCase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
UpperCAmelCase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
UpperCAmelCase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
UpperCAmelCase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
UpperCAmelCase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
UpperCAmelCase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
UpperCAmelCase = name.replace('bn' , 'batch_norm' )
if "head" in name:
UpperCAmelCase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
UpperCAmelCase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
UpperCAmelCase = name.replace('auxlayer' , 'auxiliary_head.head' )
return name
def lowercase__ ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
UpperCAmelCase = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.weight" )
UpperCAmelCase = state_dict.pop(F"dpt.encoder.layer.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase = in_proj_weight[: config.hidden_size, :]
UpperCAmelCase = in_proj_bias[: config.hidden_size]
UpperCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
UpperCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
UpperCAmelCase = in_proj_bias[-config.hidden_size :]
def lowercase__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowercase__ ( lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = get_dpt_config(lowerCAmelCase )
# load original state_dict from URL
UpperCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
UpperCAmelCase = state_dict.pop(lowerCAmelCase )
UpperCAmelCase = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
UpperCAmelCase = DPTForSemanticSegmentation(lowerCAmelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
# Check outputs on an image
UpperCAmelCase = 480 if 'ade' in checkpoint_url else 384
UpperCAmelCase = DPTImageProcessor(size=lowerCAmelCase )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(lowerCAmelCase , return_tensors='pt' )
# forward pass
UpperCAmelCase = model(**lowerCAmelCase ).logits if 'ade' in checkpoint_url else model(**lowerCAmelCase ).predicted_depth
# Assert logits
UpperCAmelCase = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] )
if "ade" in checkpoint_url:
UpperCAmelCase = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] )
assert outputs.shape == torch.Size(lowerCAmelCase )
assert (
torch.allclose(outputs[0, 0, :3, :3] , lowerCAmelCase , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , lowerCAmelCase )
)
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
print('Pushing model to hub...' )
model.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=lowerCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowerCAmelCase , lowerCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=lowerCAmelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 183 | 0 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : List[str] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class UpperCamelCase__ (a ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = BartphoTokenizer
_UpperCamelCase = False
_UpperCamelCase = True
def UpperCamelCase_ ( self ):
super().setUp()
lowerCamelCase__ = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]
lowerCamelCase__ = dict(zip(_UpperCamelCase ,range(len(_UpperCamelCase ) ) ) )
lowerCamelCase__ = {"""unk_token""": """<unk>"""}
lowerCamelCase__ = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""monolingual_vocab_file"""] )
with open(self.monolingual_vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
lowerCamelCase__ = BartphoTokenizer(_UpperCamelCase ,self.monolingual_vocab_file ,**self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self ,**_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname ,**_UpperCamelCase )
def UpperCamelCase_ ( self ,_lowerCAmelCase ):
lowerCamelCase__ = """This is a là test"""
lowerCamelCase__ = """This is a<unk><unk> test"""
return input_text, output_text
def UpperCamelCase_ ( self ):
lowerCamelCase__ = BartphoTokenizer(_UpperCamelCase ,self.monolingual_vocab_file ,**self.special_tokens_map )
lowerCamelCase__ = """This is a là test"""
lowerCamelCase__ = """▁This ▁is ▁a ▁l à ▁t est""".split()
lowerCamelCase__ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase ,_UpperCamelCase )
lowerCamelCase__ = tokens + [tokenizer.unk_token]
lowerCamelCase__ = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) ,_UpperCamelCase )
| 50 |
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=18 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , )-> Dict:
_A = size if size is not None else {'height': 20, 'width': 20}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = size
_A = do_normalize
_A = do_convert_rgb
_A = [512, 1024, 2048, 4096]
_A = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def UpperCamelCase ( self )-> Dict:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCamelCase ( self )-> Tuple:
_A = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_A = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowerCAmelCase_ ( UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase ( self )-> Optional[int]:
_A = PixaStructImageProcessingTester(self )
@property
def UpperCamelCase ( self )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self )-> List[Any]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) )
def UpperCamelCase ( self )-> Any:
_A = self.image_processor_tester.prepare_dummy_image()
_A = self.image_processing_class(**self.image_processor_dict )
_A = 2048
_A = image_processor(_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def UpperCamelCase ( self )-> int:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase ( self )-> List[str]:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_A = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCamelCase ):
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
_A = 'Hello'
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase ( self )-> Optional[int]:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase ( self )-> Union[str, Any]:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowerCAmelCase_ ( UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase ( self )-> str:
_A = PixaStructImageProcessingTester(self , num_channels=4 )
_A = 3
@property
def UpperCamelCase ( self )-> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self )-> Any:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) )
def UpperCamelCase ( self )-> Optional[Any]:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 292 | 0 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
A =get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
class _a ( __a , unittest.TestCase ):
__a : Dict = XLMProphetNetTokenizer
__a : Union[str, Any] = False
__a : Tuple = True
def A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = '''[PAD]'''
UpperCAmelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_UpperCAmelCase ) , 1_012 )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_012 )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCAmelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
UpperCAmelCase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = '''Hello World!'''
UpperCAmelCase = [35_389, 6_672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = {'''input_ids''': [[11_073, 82_783, 18, 26, 82_783, 549, 51_540, 248, 17_209, 1_301, 217, 20, 215_186, 1_325, 147, 17_209, 1_301, 217, 20, 56_370, 53, 122_020, 20, 16_477, 27, 87_355, 4_548, 20, 4_728, 78_392, 17, 159_969, 18, 26, 24_491, 629, 15, 538, 22_704, 5_439, 15, 2_788, 24_491, 9_885, 15, 43_534, 605, 15, 814, 18_403, 33_200, 29, 15, 43_534, 24_458, 12_410, 111, 24_966, 83_669, 9_637, 144_068, 26, 850, 22_346, 27, 147, 24_966, 83_669, 83_490, 26, 39_113, 735, 27, 689, 656, 2_800, 1_339, 4_600, 53, 122_020, 115_785, 34, 816, 1_339, 46_887, 18, 147, 53_905, 1_951, 42_238, 41_170, 17_732, 834, 436, 15, 27_523, 98_733, 217, 147, 5_542, 4_981, 930, 17_347, 16, 2], [20_091, 629, 94, 82_786, 58, 490, 20, 1_528, 84, 53_905, 344, 80_592, 110_128, 18_822, 5_267, 1_306, 62, 152_537, 308, 7_997, 401, 124_427, 549, 35_442, 225, 109, 15_055, 25_748, 147, 7_119, 43_712, 34, 767, 135_366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63_784, 119_466, 17, 147_808, 88_214, 18, 656, 81, 32, 3_296, 10_280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 714 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A ={
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 358 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _lowerCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
if "model" in orig_key:
_lowercase: Union[str, Any] = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
_lowercase: Optional[Any] = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
_lowercase: Tuple = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
_lowercase: Union[str, Any] = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
_lowercase: Any = orig_key.split('''.''' )[0].split('''_''' )[-1]
_lowercase: Optional[int] = orig_key.replace(f'''transformer_{layer_num}''' , f'''encoder.layer.{layer_num}''' )
if "mha.attn" in orig_key:
_lowercase: Tuple = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
_lowercase: Dict = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
_lowercase: int = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
_lowercase: str = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
_lowercase: Tuple = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
_lowercase: List[str] = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
_lowercase: Any = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
_lowercase: Any = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
_lowercase: Union[str, Any] = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
_lowercase: Dict = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
_lowercase: Any = '''yoso.''' + orig_key
return orig_key
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowercase: str = orig_state_dict.pop(_UpperCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_lowercase: str = val
_lowercase: str = orig_state_dict['''cls.predictions.decoder.bias''']
_lowercase: Optional[int] = torch.arange(_UpperCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def _lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase: List[str] = torch.load(_UpperCamelCase , map_location='''cpu''' )['''model_state_dict''']
_lowercase: List[str] = YosoConfig.from_json_file(_UpperCamelCase )
_lowercase: Optional[int] = YosoForMaskedLM(_UpperCamelCase )
_lowercase: Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , _UpperCamelCase )
print(model.load_state_dict(_UpperCamelCase ) )
model.eval()
model.save_pretrained(_UpperCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
if __name__ == "__main__":
A__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
A__ : Any = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 353 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
A__ : List[Any] = logging.get_logger(__name__)
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self , *A_ , **A_ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , A_ , )
super().__init__(*A_ , **A_ )
| 353 | 1 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ : int = len(SCREAMING_SNAKE_CASE_ )
lowercase_ : int = len(SCREAMING_SNAKE_CASE_ )
lowercase_ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowercase_ : list = []
for char_count in range(SCREAMING_SNAKE_CASE_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 712 | '''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
def _lowerCamelCase (self ) -> Any:
lowercase_ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_a , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(_a , 'num_encoder_blocks' ) )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__(self , _a , _a=13 , _a=64 , _a=3 , _a=4 , _a=[2, 2, 2, 2] , _a=[8, 4, 2, 1] , _a=[16, 32, 64, 128] , _a=[1, 4, 8, 16] , _a=[1, 2, 4, 8] , _a=True , _a=True , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.02 , _a=3 , _a=None , ) -> List[Any]:
lowercase_ : Dict = parent
lowercase_ : int = batch_size
lowercase_ : Any = image_size
lowercase_ : List[str] = num_channels
lowercase_ : str = num_encoder_blocks
lowercase_ : Tuple = sr_ratios
lowercase_ : Tuple = depths
lowercase_ : Dict = hidden_sizes
lowercase_ : Optional[int] = downsampling_rates
lowercase_ : List[str] = num_attention_heads
lowercase_ : List[Any] = is_training
lowercase_ : Dict = use_labels
lowercase_ : Optional[Any] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : int = attention_probs_dropout_prob
lowercase_ : Any = initializer_range
lowercase_ : List[str] = num_labels
lowercase_ : str = scope
def _lowerCamelCase (self ) -> str:
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : str = None
if self.use_labels:
lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ : Tuple = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase (self ) -> List[Any]:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase (self , _a , _a , _a ) -> Dict:
lowercase_ : Union[str, Any] = SegformerModel(config=_a )
model.to(_a )
model.eval()
lowercase_ : int = model(_a )
lowercase_ : Any = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _lowerCamelCase (self , _a , _a , _a ) -> Tuple:
lowercase_ : Optional[Any] = self.num_labels
lowercase_ : Union[str, Any] = SegformerForSemanticSegmentation(_a )
model.to(_a )
model.eval()
lowercase_ : Tuple = model(_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowercase_ : str = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCamelCase (self , _a , _a , _a ) -> int:
lowercase_ : Dict = 1
lowercase_ : Dict = SegformerForSemanticSegmentation(config=_a )
model.to(_a )
model.eval()
lowercase_ : List[str] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_a )
lowercase_ : Any = model(_a , labels=_a )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCamelCase (self ) -> str:
lowercase_ : Any = self.prepare_config_and_inputs()
lowercase_ ,lowercase_ ,lowercase_ : Dict = config_and_inputs
lowercase_ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( _snake_case , _snake_case , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
A : Tuple = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A : str = True
A : int = False
A : int = False
A : Optional[Any] = False
def _lowerCamelCase (self ) -> int:
lowercase_ : List[str] = SegformerModelTester(self )
lowercase_ : str = SegformerConfigTester(self , config_class=_a )
def _lowerCamelCase (self ) -> str:
self.config_tester.run_common_tests()
def _lowerCamelCase (self ) -> Dict:
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _lowerCamelCase (self ) -> Dict:
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_a )
def _lowerCamelCase (self ) -> Union[str, Any]:
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_a )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _lowerCamelCase (self ) -> Any:
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _lowerCamelCase (self ) -> Optional[int]:
pass
def _lowerCamelCase (self ) -> Dict:
lowercase_ ,lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(_a )
lowercase_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : str = [*signature.parameters.keys()]
lowercase_ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _a )
def _lowerCamelCase (self ) -> Tuple:
lowercase_ ,lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Optional[Any] = True
for model_class in self.all_model_classes:
lowercase_ : Any = True
lowercase_ : int = False
lowercase_ : int = True
lowercase_ : Tuple = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowercase_ : int = model(**self._prepare_for_class(_a , _a ) )
lowercase_ : Union[str, Any] = outputs.attentions
lowercase_ : Tuple = sum(self.model_tester.depths )
self.assertEqual(len(_a ) , _a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : Dict = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(_a , _a ) )
lowercase_ : List[Any] = outputs.attentions
self.assertEqual(len(_a ) , _a )
# verify the first attentions (first block, first layer)
lowercase_ : Any = (self.model_tester.image_size // 4) ** 2
lowercase_ : Dict = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowercase_ : Optional[int] = (self.model_tester.image_size // 32) ** 2
lowercase_ : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowercase_ : Optional[Any] = len(_a )
# Check attention is always last and order is fine
lowercase_ : List[Any] = True
lowercase_ : Union[str, Any] = True
lowercase_ : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowercase_ : Any = model(**self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + 1 , len(_a ) )
lowercase_ : List[Any] = outputs.attentions
self.assertEqual(len(_a ) , _a )
# verify the first attentions (first block, first layer)
lowercase_ : Any = (self.model_tester.image_size // 4) ** 2
lowercase_ : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowerCamelCase (self ) -> List[str]:
def check_hidden_states_output(_a , _a , _a ):
lowercase_ : Dict = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(_a , _a ) )
lowercase_ : Any = outputs.hidden_states
lowercase_ : str = self.model_tester.num_encoder_blocks
self.assertEqual(len(_a ) , _a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowercase_ ,lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[Any] = True
check_hidden_states_output(_a , _a , _a )
def _lowerCamelCase (self ) -> Dict:
if not self.model_tester.is_training:
return
lowercase_ ,lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(_a ):
continue
lowercase_ : Optional[int] = model_class(_a )
model.to(_a )
model.train()
lowercase_ : List[Any] = self._prepare_for_class(_a , _a , return_labels=_a )
lowercase_ : str = model(**_a ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase (self ) -> str:
pass
@slow
def _lowerCamelCase (self ) -> Any:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = SegformerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _UpperCamelCase ( ):
lowercase_ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCamelCase (self ) -> Union[str, Any]:
# only resize + normalize
lowercase_ : Union[str, Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_a , align=_a , do_random_crop=_a )
lowercase_ : List[str] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_a )
lowercase_ : int = prepare_img()
lowercase_ : int = image_processor(images=_a , return_tensors='pt' )
lowercase_ : int = encoded_inputs.pixel_values.to(_a )
with torch.no_grad():
lowercase_ : List[Any] = model(_a )
lowercase_ : Union[str, Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _a )
lowercase_ : int = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _a , atol=1e-4 ) )
@slow
def _lowerCamelCase (self ) -> Dict:
# only resize + normalize
lowercase_ : Optional[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_a , align=_a , do_random_crop=_a )
lowercase_ : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(_a )
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : Tuple = image_processor(images=_a , return_tensors='pt' )
lowercase_ : Any = encoded_inputs.pixel_values.to(_a )
with torch.no_grad():
lowercase_ : Union[str, Any] = model(_a )
lowercase_ : str = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _a )
lowercase_ : List[Any] = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _a , atol=1e-1 ) )
@slow
def _lowerCamelCase (self ) -> Optional[Any]:
# only resize + normalize
lowercase_ : List[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_a , align=_a , do_random_crop=_a )
lowercase_ : List[Any] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
_a )
lowercase_ : List[str] = prepare_img()
lowercase_ : List[str] = image_processor(images=_a , return_tensors='pt' )
lowercase_ : Optional[Any] = encoded_inputs.pixel_values.to(_a )
with torch.no_grad():
lowercase_ : Dict = model(_a )
lowercase_ : Optional[int] = outputs.logits.detach().cpu()
lowercase_ : Tuple = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(500, 300)] )
lowercase_ : Any = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _a )
lowercase_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=_a )
lowercase_ : Optional[int] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _a )
| 438 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 76 |
"""simple docstring"""
import os
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : str = len(grid[0] )
lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Optional[int] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(n_rows - 3 ):
lowerCAmelCase : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
lowerCAmelCase : Dict = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
lowerCAmelCase : Optional[int] = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
lowerCAmelCase : Optional[Any] = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
lowerCAmelCase : Union[str, Any] = max(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if max_product > largest:
lowerCAmelCase : Union[str, Any] = max_product
return largest
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Any = []
with open(os.path.dirname(SCREAMING_SNAKE_CASE ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
lowerCAmelCase : Optional[Any] = [[int(SCREAMING_SNAKE_CASE ) for i in grid[j]] for j in range(len(SCREAMING_SNAKE_CASE ) )]
return largest_product(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 645 | 0 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files" , [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
] , )
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md" , "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json" , "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
SCREAMING_SNAKE_CASE__ : Any = DatasetInfosDict.from_directory(_lowerCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
"dataset_info" , [
DatasetInfo(),
DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , ),
] , )
def UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : DatasetInfo ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = str(_lowerCamelCase )
dataset_info.write_to_directory(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = DatasetInfo.from_directory(_lowerCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(_lowerCamelCase , "dataset_info.json" ) )
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DatasetInfo(
description="foo" , citation="bar" , homepage="https://foo.bar" , license="CC0" , features=Features({"a": Value("int32" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train", "num_examples": 42}] , download_checksums={} , download_size=1_337 , post_processing_size=442 , dataset_size=1_234 , size_in_bytes=1_337 + 442 + 1_234 , )
SCREAMING_SNAKE_CASE__ : Any = dataset_info._to_yaml_dict()
assert sorted(_lowerCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
SCREAMING_SNAKE_CASE__ : str = yaml.safe_dump(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = yaml.safe_load(_lowerCamelCase )
assert dataset_info_yaml_dict == reloaded
def UpperCAmelCase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = DatasetInfo()
SCREAMING_SNAKE_CASE__ : str = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict" , [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo" , features=Features({"a": Value("int32" )} ) , builder_name="builder" , config_name="config" , version="1.0.0" , splits=[{"name": "train"}] , download_size=42 , )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=42 ),
"v2": DatasetInfo(dataset_size=1_337 ),
} ),
] , )
def UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : DatasetInfosDict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = str(_lowerCamelCase )
dataset_infos_dict.write_to_directory(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : int = DatasetInfosDict.from_directory(_lowerCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
SCREAMING_SNAKE_CASE__ : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
SCREAMING_SNAKE_CASE__ : int = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(_lowerCamelCase , "README.md" ) ) | 702 |
import sys
from collections import defaultdict
class _a :
"""simple docstring"""
def __init__( self : Any ) ->Dict:
SCREAMING_SNAKE_CASE__ : Tuple = []
def A_ ( self : int , a : List[str] ) ->Dict:
return self.node_position[vertex]
def A_ ( self : Optional[Any] , a : Any , a : List[str] ) ->Optional[Any]:
SCREAMING_SNAKE_CASE__ : str = pos
def A_ ( self : List[Any] , a : List[str] , a : Dict , a : Dict , a : List[Any] ) ->Optional[int]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
SCREAMING_SNAKE_CASE__ : Dict = 2 * start + 1
else:
SCREAMING_SNAKE_CASE__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : int = heap[smallest_child], positions[smallest_child]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Optional[int] = (
heap[start],
positions[start],
)
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ : Tuple = temp, tempa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a )
self.top_to_bottom(a , a , a , a )
def A_ ( self : Union[str, Any] , a : Tuple , a : Tuple , a : Union[str, Any] , a : List[Any] ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : List[Any] = position[index]
while index != 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
SCREAMING_SNAKE_CASE__ : List[Any] = heap[parent]
SCREAMING_SNAKE_CASE__ : str = position[parent]
self.set_position(position[parent] , a )
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : Optional[Any] = temp
self.set_position(a , a )
break
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
else:
SCREAMING_SNAKE_CASE__ : int = val
SCREAMING_SNAKE_CASE__ : List[str] = temp
self.set_position(a , 0 )
def A_ ( self : Union[str, Any] , a : int , a : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] = len(a ) // 2 - 1
for i in range(a , -1 , -1 ):
self.top_to_bottom(a , a , len(a ) , a )
def A_ ( self : Dict , a : List[Any] , a : Dict ) ->Optional[int]:
SCREAMING_SNAKE_CASE__ : Any = positions[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = sys.maxsize
self.top_to_bottom(a , 0 , len(a ) , a )
return temp
def UpperCAmelCase ( _lowerCamelCase : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Heap()
SCREAMING_SNAKE_CASE__ : Any = [0] * len(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : Any = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [] # Heap of Distance of vertices from their neighboring vertex
SCREAMING_SNAKE_CASE__ : str = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : List[str] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
SCREAMING_SNAKE_CASE__ : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE__ : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase :Union[str, Any] = int(input("Enter number of edges: ").strip())
__lowercase :Dict = defaultdict(list)
for _ in range(edges_number):
__lowercase :Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 26 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case = 1_60_00):
__snake_case = int(round(sample_rate * max_length))
if len(snake_case) <= sample_length:
return wav
__snake_case = randint(0, len(snake_case) - sample_length - 1)
return wav[random_offset : random_offset + sample_length]
@dataclass
class _A :
"""simple docstring"""
UpperCamelCase_ : Optional[str] = field(default=_UpperCAmelCase , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
UpperCamelCase_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
UpperCamelCase_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
UpperCamelCase_ : str = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
UpperCamelCase_ : str = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
UpperCamelCase_ : str = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
UpperCamelCase_ : str = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
UpperCamelCase_ : Optional[int] = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ : Optional[int] = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase_ : float = field(
default=2_0 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class _A :
"""simple docstring"""
UpperCamelCase_ : str = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
UpperCamelCase_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
UpperCamelCase_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase_ : Optional[str] = field(
default=_UpperCAmelCase , metadata={'''help''': '''Name or path of preprocessor config.'''} )
UpperCamelCase_ : bool = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
UpperCamelCase_ : bool = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
UpperCamelCase_ : bool = field(
default=_UpperCAmelCase , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCamelCase_ : Optional[bool] = field(
default=_UpperCAmelCase , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
UpperCamelCase_ : bool = field(
default=_UpperCAmelCase , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowercase ( self : int ) -> Tuple:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''will be removed in a future version. Use `--freeze_feature_encoder`'''
'''instead. Setting `freeze_feature_encoder==True`.''' , A_ , )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'''The argument `--freeze_feature_extractor` is deprecated and '''
'''should not be used in combination with `--freeze_feature_encoder`.'''
'''Only make use of `--freeze_feature_encoder`.''' )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith('''.json'''):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_audio_classification''', snake_case, snake_case)
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout)], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case = training_args.get_process_log_level()
logger.setLevel(snake_case)
transformers.utils.logging.set_verbosity(snake_case)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} "
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}")
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Detecting last checkpoint.
__snake_case = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to train from scratch.''')
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''')
# Initialize our dataset and prepare it for the audio classification task.
__snake_case = DatasetDict()
__snake_case = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name, use_auth_token=True if model_args.use_auth_token else None, )
__snake_case = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, split=data_args.eval_split_name, use_auth_token=True if model_args.use_auth_token else None, )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--audio_column_name` to the correct audio column - one of '''
f"{', '.join(raw_datasets['train'].column_names)}.")
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
f"--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. "
'''Make sure to set `--label_column_name` to the correct text column - one of '''
f"{', '.join(raw_datasets['train'].column_names)}.")
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
__snake_case = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path, return_attention_mask=model_args.attention_mask, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
__snake_case = raw_datasets.cast_column(
data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate))
__snake_case = feature_extractor.model_input_names[0]
def train_transforms(snake_case):
__snake_case = []
for audio in batch[data_args.audio_column_name]:
__snake_case = random_subsample(
audio['''array'''], max_length=data_args.max_length_seconds, sample_rate=feature_extractor.sampling_rate)
subsampled_wavs.append(snake_case)
__snake_case = feature_extractor(snake_case, sampling_rate=feature_extractor.sampling_rate)
__snake_case = {model_input_name: inputs.get(snake_case)}
__snake_case = list(batch[data_args.label_column_name])
return output_batch
def val_transforms(snake_case):
__snake_case = [audio['''array'''] for audio in batch[data_args.audio_column_name]]
__snake_case = feature_extractor(snake_case, sampling_rate=feature_extractor.sampling_rate)
__snake_case = {model_input_name: inputs.get(snake_case)}
__snake_case = list(batch[data_args.label_column_name])
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__snake_case = raw_datasets['''train'''].features[data_args.label_column_name].names
__snake_case , __snake_case = {}, {}
for i, label in enumerate(snake_case):
__snake_case = str(snake_case)
__snake_case = label
# Load the accuracy metric from the datasets package
__snake_case = evaluate.load('''accuracy''')
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case):
__snake_case = np.argmax(eval_pred.predictions, axis=1)
return metric.compute(predictions=snake_case, references=eval_pred.label_ids)
__snake_case = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(snake_case), labelaid=snake_case, idalabel=snake_case, finetuning_task='''audio-classification''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
__snake_case = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path), config=snake_case, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
__snake_case = (
raw_datasets['''train'''].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples))
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case, output_all_columns=snake_case)
if training_args.do_eval:
if data_args.max_eval_samples is not None:
__snake_case = (
raw_datasets['''eval'''].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples))
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case, output_all_columns=snake_case)
# Initialize our trainer
__snake_case = Trainer(
model=snake_case, args=snake_case, train_dataset=raw_datasets['''train'''] if training_args.do_train else None, eval_dataset=raw_datasets['''eval'''] if training_args.do_eval else None, compute_metrics=snake_case, tokenizer=snake_case, )
# Training
if training_args.do_train:
__snake_case = None
if training_args.resume_from_checkpoint is not None:
__snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case = last_checkpoint
__snake_case = trainer.train(resume_from_checkpoint=snake_case)
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics)
trainer.save_metrics('''train''', train_result.metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
__snake_case = trainer.evaluate()
trainer.log_metrics('''eval''', snake_case)
trainer.save_metrics('''eval''', snake_case)
# Write model card and (optionally) push to hub
__snake_case = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''audio-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''audio-classification'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case)
else:
trainer.create_model_card(**snake_case)
if __name__ == "__main__":
main() | 564 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Union[str, Any] = {
"configuration_blenderbot_small": [
"BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlenderbotSmallConfig",
"BlenderbotSmallOnnxConfig",
],
"tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ["BlenderbotSmallTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Tuple = [
"BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlenderbotSmallForCausalLM",
"BlenderbotSmallForConditionalGeneration",
"BlenderbotSmallModel",
"BlenderbotSmallPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
"TFBlenderbotSmallForConditionalGeneration",
"TFBlenderbotSmallModel",
"TFBlenderbotSmallPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
"FlaxBlenderbotSmallForConditionalGeneration",
"FlaxBlenderbotSmallModel",
"FlaxBlenderbotSmallPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
__lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 564 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int ) -> bool:
"""simple docstring"""
__a = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int = 5000 ) -> int:
"""simple docstring"""
__a = [(i * (3 * i - 1)) // 2 for i in range(1, SCREAMING_SNAKE_CASE__ )]
for i, pentagonal_i in enumerate(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__, len(SCREAMING_SNAKE_CASE__ ) ):
__a = pentagonal_nums[j]
__a = pentagonal_i + pentagonal_j
__a = pentagonal_j - pentagonal_i
if is_pentagonal(SCREAMING_SNAKE_CASE__ ) and is_pentagonal(SCREAMING_SNAKE_CASE__ ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""") | 270 |
'''simple docstring'''
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 270 | 1 |
'''simple docstring'''
from timeit import timeit
UpperCamelCase_ = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _lowerCAmelCase ( __magic_name__ : str ) -> bool:
lowercase : Tuple =0
lowercase : Union[str, Any] =len(__magic_name__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _lowerCAmelCase ( __magic_name__ : str ) -> bool:
lowercase : List[str] =len(__magic_name__ ) // 2
lowercase : List[str] =len(__magic_name__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__magic_name__ ) )
def _lowerCAmelCase ( __magic_name__ : str ) -> bool:
if len(__magic_name__ ) <= 2:
return True
if s[0] == s[len(__magic_name__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _lowerCAmelCase ( __magic_name__ : str ) -> bool:
return s == s[::-1]
def _lowerCAmelCase ( __magic_name__ : str ) -> None:
lowercase : int =f'''all({name}(key) is value for key, value in test_data.items())'''
lowercase : Optional[Any] =f'''from __main__ import test_data, {name}'''
lowercase : int =500000
lowercase : List[str] =timeit(stmt=__magic_name__ , setup=__magic_name__ , number=__magic_name__ )
print(f'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'''{key:21} {value}''')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 92 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_lowerCamelCase : str = 299792458
# Symbols
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = symbols("""ct x y z""")
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError('''Speed must not exceed light speed 299,792,458 [m/s]!''' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('''Speed must be greater than or equal to 1!''' )
return velocity / c
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(lowercase_ ) ** 2 )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(lowercase_ ), -gamma(lowercase_ ) * beta(lowercase_ ), 0, 0],
[-gamma(lowercase_ ) * beta(lowercase_ ), gamma(lowercase_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ = None ) -> np.ndarray:
"""simple docstring"""
if event is None:
A__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(lowercase_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_lowerCamelCase : Tuple = transform(29979245)
print("""Example of four vector: """)
print(F'''ct\' = {four_vector[0]}''')
print(F'''x\' = {four_vector[1]}''')
print(F'''y\' = {four_vector[2]}''')
print(F'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
_lowerCamelCase : int = {ct: c, x: 1, y: 1, z: 1}
_lowerCamelCase : Any = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F'''\n{numerical_vector}''')
| 87 | 0 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCamelCase_ : Any = 10
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
for i in range(__lowerCamelCase , __lowerCamelCase ):
if array[i] == target:
return i
return -1
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase ):
__a = 0
__a = len(__lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__a = (left + right) // 3 + 1
__a = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__a = one_third - 1
elif array[two_third] < target:
__a = two_third + 1
else:
__a = one_third + 1
__a = two_third - 1
else:
return -1
def lowerCAmelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if left < right:
if right - left < precision:
return lin_search(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__a = (left + right) // 3 + 1
__a = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(__lowerCamelCase , one_third - 1 , __lowerCamelCase , __lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , __lowerCamelCase , __lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : Tuple = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase_ : int = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
lowerCamelCase_ : Optional[Any] = int(input("""Enter the number to be found in the list:\n""").strip())
lowerCamelCase_ : Optional[int] = ite_ternary_search(collection, target)
lowerCamelCase_ : Tuple = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 246 | import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
lowerCamelCase_ : Dict = logging.get_logger(__name__)
class a__ ( __snake_case ):
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ) -> None:
warnings.warn(
'The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use BeitImageProcessor instead.' , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase )
| 246 | 1 |
def _a ( a :List[Any] ) -> str:
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _a ( a :dict[int, list[int]] ) -> list[tuple[int, int]]:
a = 0
a = len(a ) # No of vertices in graph
a = [0] * n
a = [False] * n
def dfs(a :Tuple , a :Any , a :Optional[int] , a :Any ):
a = True
a = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(a , a , a , id_ )
a = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
a = min(low[at] , low[to] )
a = []
for i in range(a ):
if not visited[i]:
dfs(a , -1 , a , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 117 |
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( a :Optional[Any] ) -> List[Any]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
a = k.replace(a , a )
if k.startswith('''encoder''' ):
a = k.replace('''.attn''' , '''.self_attn''' )
a = k.replace('''norm1''' , '''self_attn_layer_norm''' )
a = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
a = k.replace('''norm1''' , '''self_attn_layer_norm''' )
a = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
a = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def _a ( a :Dict ) -> Tuple:
a = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
a = sd.pop(a )
a = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
a = v
UpperCAmelCase__ = ["START"]
@torch.no_grad()
def _a ( a :Dict , a :str , a :int ) -> int:
a = torch.load(a , map_location='''cpu''' )
a = model['''model''']
a = BlenderbotConfig.from_json_file(a )
a = BlenderbotForConditionalGeneration(a )
a = m.model.state_dict().keys()
a = []
a = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
a = rename_state_dict_key(a )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
a = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(a )
m.model.load_state_dict(a , strict=a )
m.half()
m.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
UpperCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 117 | 1 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
lowercase_ = 1_00
lowercase_ = set(range(3, NUM_PRIMES, 2))
primes.add(2)
lowercase_ = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __lowerCAmelCase ( __lowerCamelCase : int ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__lowerCAmelCase =set()
__lowerCAmelCase =42
__lowerCAmelCase =42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __lowerCAmelCase ( __lowerCamelCase : int = 5000 ) -> int | None:
for number_to_partition in range(1 , __lowerCamelCase ):
if len(partition(__lowerCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 456 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __a ( SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = "realm"
def __init__( self : Tuple , snake_case_ : Tuple=3_05_22 , snake_case_ : List[Any]=7_68 , snake_case_ : Any=1_28 , snake_case_ : Union[str, Any]=12 , snake_case_ : List[str]=12 , snake_case_ : Any=8 , snake_case_ : Any=30_72 , snake_case_ : int="gelu_new" , snake_case_ : Optional[int]=0.1 , snake_case_ : Tuple=0.1 , snake_case_ : List[Any]=5_12 , snake_case_ : List[Any]=2 , snake_case_ : Any=0.0_2 , snake_case_ : List[str]=1e-12 , snake_case_ : Optional[Any]=2_56 , snake_case_ : Dict=10 , snake_case_ : str=1e-3 , snake_case_ : List[str]=5 , snake_case_ : Optional[Any]=3_20 , snake_case_ : Optional[Any]=13_35_37_18 , snake_case_ : Tuple=50_00 , snake_case_ : Union[str, Any]=1 , snake_case_ : Union[str, Any]=0 , snake_case_ : List[str]=2 , **snake_case_ : List[str] , )-> str:
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_)
# Common config
__lowerCAmelCase =vocab_size
__lowerCAmelCase =max_position_embeddings
__lowerCAmelCase =hidden_size
__lowerCAmelCase =retriever_proj_size
__lowerCAmelCase =num_hidden_layers
__lowerCAmelCase =num_attention_heads
__lowerCAmelCase =num_candidates
__lowerCAmelCase =intermediate_size
__lowerCAmelCase =hidden_act
__lowerCAmelCase =hidden_dropout_prob
__lowerCAmelCase =attention_probs_dropout_prob
__lowerCAmelCase =initializer_range
__lowerCAmelCase =type_vocab_size
__lowerCAmelCase =layer_norm_eps
# Reader config
__lowerCAmelCase =span_hidden_size
__lowerCAmelCase =max_span_width
__lowerCAmelCase =reader_layer_norm_eps
__lowerCAmelCase =reader_beam_size
__lowerCAmelCase =reader_seq_len
# Retrieval config
__lowerCAmelCase =num_block_records
__lowerCAmelCase =searcher_beam_size
| 456 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {
"configuration_informer": [
"INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"InformerForPrediction",
"InformerModel",
"InformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ = {"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332 | 1 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
_lowerCamelCase = sys.version_info >= (3, 10)
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Any=None , UpperCamelCase__: Tuple=None ):
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = None
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "titi"
lowerCamelCase_ = "toto"
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "titi"
lowerCamelCase_ = "toto"
lowerCamelCase_ = 42
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = "toto"
def _snake_case ( self :Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = "toto"
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = None
lowerCamelCase_ = field(default=UpperCamelCase__ , metadata={"help": "help message"} )
lowerCamelCase_ = None
lowerCamelCase_ = list_field(default=[] )
lowerCamelCase_ = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = list_field(default=[] )
lowerCamelCase_ = list_field(default=[1, 2, 3] )
lowerCamelCase_ = list_field(default=["Hallo", "Bonjour", "Hello"] )
lowerCamelCase_ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = field()
lowerCamelCase_ = field()
lowerCamelCase_ = field()
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = 42
lowerCamelCase_ = field()
lowerCamelCase_ = None
lowerCamelCase_ = field(default="toto" , metadata={"help": "help message"} )
lowerCamelCase_ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = None
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = None
lowerCamelCase_ = field(default=UpperCamelCase__ , metadata={"help": "help message"} )
lowerCamelCase_ = None
lowerCamelCase_ = list_field(default=[] )
lowerCamelCase_ = list_field(default=[] )
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :str , __A :argparse.ArgumentParser , __A :argparse.ArgumentParser ) -> List[str]:
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(__A ).items() if k != """container"""}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in vars(__A ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , __A ) and yy.get("""choices""" , __A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](__A ) , yy["""type"""](__A ) )
del xx["type"], yy["type"]
self.assertEqual(__A , __A )
def _snake_case ( self :int ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(__A )
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__A , required=__A )
expected.add_argument("""--bar""" , type=__A , required=__A )
expected.add_argument("""--baz""" , type=__A , required=__A )
expected.add_argument("""--flag""" , type=__A , default=__A , const=__A , nargs="""?""" )
self.argparsersEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
((SCREAMING_SNAKE_CASE__) , ) = parser.parse_args_into_dataclasses(__A , look_for_args_file=__A )
self.assertFalse(example.flag )
def _snake_case ( self :Tuple ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(__A )
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=__A )
expected.add_argument("""--baz""" , default="""toto""" , type=__A , help="""help message""" )
self.argparsersEqual(__A , __A )
def _snake_case ( self :List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__A , default=__A , const=__A , nargs="""?""" )
expected.add_argument("""--baz""" , type=__A , default=__A , const=__A , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=__A , dest="""baz""" )
expected.add_argument("""--opt""" , type=__A , default=__A )
SCREAMING_SNAKE_CASE__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE__ = HfArgumentParser(__A )
self.argparsersEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = parser.parse_args([] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(__A , Namespace(foo=__A , baz=__A , opt=__A ) )
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(__A )
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
SCREAMING_SNAKE_CASE__ = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _snake_case ( self :int ) -> Any:
"""simple docstring"""
@dataclass
class UpperCamelCase_ :
lowerCamelCase_ = "toto"
SCREAMING_SNAKE_CASE__ = HfArgumentParser(__A )
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
SCREAMING_SNAKE_CASE__ = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def _snake_case ( self :Any ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(__A )
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=__A )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=__A )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__A )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=__A )
self.argparsersEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = parser.parse_args([] )
self.assertEqual(
__A , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
SCREAMING_SNAKE_CASE__ = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(__A , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=__A , type=__A )
expected.add_argument("""--bar""" , default=__A , type=__A , help="""help message""" )
expected.add_argument("""--baz""" , default=__A , type=__A )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=__A )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=__A )
SCREAMING_SNAKE_CASE__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__A )
for dataclass_type in dataclass_types:
SCREAMING_SNAKE_CASE__ = HfArgumentParser(__A )
self.argparsersEqual(__A , __A )
SCREAMING_SNAKE_CASE__ = parser.parse_args([] )
self.assertEqual(__A , Namespace(foo=__A , bar=__A , baz=__A , ces=[] , des=[] ) )
SCREAMING_SNAKE_CASE__ = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(__A , Namespace(foo=12 , bar=3.1_4 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def _snake_case ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(__A )
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=__A , required=__A )
expected.add_argument("""--required_str""" , type=__A , required=__A )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__A , )
self.argparsersEqual(__A , __A )
def _snake_case ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(__A )
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=__A , required=__A )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=__A , )
expected.add_argument("""--opt""" , type=__A , default=__A )
expected.add_argument("""--baz""" , default="""toto""" , type=__A , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=__A )
self.argparsersEqual(__A , __A )
def _snake_case ( self :int ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(__A )
SCREAMING_SNAKE_CASE__ = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
SCREAMING_SNAKE_CASE__ = parser.parse_dict(__A )[0]
SCREAMING_SNAKE_CASE__ = BasicExample(**__A )
self.assertEqual(__A , __A )
def _snake_case ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(__A )
SCREAMING_SNAKE_CASE__ = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(__A , parser.parse_dict , __A , allow_extra_keys=__A )
def _snake_case ( self :str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(__A )
SCREAMING_SNAKE_CASE__ = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = os.path.join(__A , """temp_json""" )
os.mkdir(__A )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(__A , __A )
SCREAMING_SNAKE_CASE__ = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
SCREAMING_SNAKE_CASE__ = BasicExample(**__A )
self.assertEqual(__A , __A )
def _snake_case ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(__A )
SCREAMING_SNAKE_CASE__ = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = os.path.join(__A , """temp_yaml""" )
os.mkdir(__A )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(__A , __A )
SCREAMING_SNAKE_CASE__ = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
SCREAMING_SNAKE_CASE__ = BasicExample(**__A )
self.assertEqual(__A , __A )
def _snake_case ( self :str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = HfArgumentParser(__A )
self.assertIsNotNone(__A ) | 59 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
SCREAMING_SNAKE_CASE__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def _snake_case ( self :Optional[Any] ) -> Tuple:
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
SCREAMING_SNAKE_CASE__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices.''' )
SCREAMING_SNAKE_CASE__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self :Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__A , env=os.environ.copy() )
@require_multi_gpu
def _snake_case ( self :Optional[int] ) -> str:
"""simple docstring"""
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
SCREAMING_SNAKE_CASE__ = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(__A , env=os.environ.copy() )
if __name__ == "__main__":
_lowerCamelCase = Accelerator()
_lowerCamelCase = (accelerator.state.process_index + 2, 10)
_lowerCamelCase = torch.randint(0, 10, shape).to(accelerator.device)
_lowerCamelCase = ''
_lowerCamelCase = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
_lowerCamelCase = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
_lowerCamelCase = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg) | 59 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[str]:
_a : Optional[Any] = '''ylacombe/bark-small'''
_a : Optional[int] = tempfile.mkdtemp()
_a : Optional[Any] = '''en_speaker_1'''
_a : Dict = '''This is a test string'''
_a : List[str] = '''speaker_embeddings_path.json'''
_a : int = '''speaker_embeddings'''
def __lowercase ( self , **_a ) -> Union[str, Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **__lowercase )
def __lowercase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = self.get_tokenizer()
_a : str = BarkProcessor(tokenizer=__lowercase )
processor.save_pretrained(self.tmpdirname )
_a : Tuple = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __lowercase ( self ) -> str:
_a : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_a : Union[str, Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_a : Dict = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __lowercase ( self ) -> Any:
_a : Optional[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_a : List[Any] = 3_5
_a : Dict = 2
_a : Optional[Any] = 8
_a : Any = {
'''semantic_prompt''': np.ones(__lowercase ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_a : str = processor(text=self.input_string , voice_preset=__lowercase )
_a : List[str] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowercase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_a : Union[str, Any] = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(__lowercase , **__lowercase )
_a : Union[str, Any] = processor(text=self.input_string , voice_preset=__lowercase )
_a : Optional[int] = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__lowercase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_a : List[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def __lowercase ( self ) -> int:
_a : str = self.get_tokenizer()
_a : List[Any] = BarkProcessor(tokenizer=__lowercase )
_a : List[Any] = processor(text=self.input_string )
_a : Tuple = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_5_6 , add_special_tokens=__lowercase , return_attention_mask=__lowercase , return_token_type_ids=__lowercase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 14 |
'''simple docstring'''
import string
def __UpperCamelCase ( lowercase__ : str ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
__lowercase =''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowercase =string.ascii_uppercase.find(lowercase__ )
__lowercase =num - key
if num < 0:
__lowercase =num + len(string.ascii_uppercase )
__lowercase =translated + string.ascii_uppercase[num]
else:
__lowercase =translated + symbol
print(F'''Decryption using Key #{key}: {translated}''' )
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =input('Encrypted message: ' )
__lowercase =message.upper()
decrypt(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 119 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase =42
_lowerCamelCase =42
def __init__( self : Union[str, Any] , a__ : UNetaDModel , a__ : KarrasVeScheduler ):
super().__init__()
self.register_modules(unet=a__ , scheduler=a__ )
@torch.no_grad()
def __call__( self : Optional[int] , a__ : int = 1 , a__ : int = 50 , a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a__ : Optional[str] = "pil" , a__ : bool = True , **a__ : int , ):
UpperCAmelCase = self.unet.config.sample_size
UpperCAmelCase = (batch_size, 3, img_size, img_size)
UpperCAmelCase = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase = randn_tensor(a__ , generator=a__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase = self.scheduler.schedule[t]
UpperCAmelCase = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase, UpperCAmelCase = self.scheduler.add_noise_to_input(a__ , a__ , generator=a__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase = self.scheduler.step(a__ , a__ , a__ , a__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase = self.scheduler.step_correct(
a__ , a__ , a__ , a__ , step_output.prev_sample , step_output['''derivative'''] , )
UpperCAmelCase = step_output.prev_sample
UpperCAmelCase = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 703 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a__ : Dict = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 570 | 0 |
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _lowerCAmelCase ( *_UpperCamelCase ):
"""simple docstring"""
with open(_UpperCamelCase , '''r''' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
A__ : str = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
A__ : Optional[Any] = torch.device('cuda', local_rank)
A__ : Optional[int] = socket.gethostname()
A__ : Union[str, Any] = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
A__ : Union[str, Any] = dist.get_rank()
A__ : Tuple = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 353 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __magic_name__ ( SCREAMING_SNAKE_CASE__ ):
def __init__( self , A_ , A_ ) -> List[str]:
"""simple docstring"""
_lowercase: List[str] = params
_lowercase: str = np.array(A_ )
_lowercase: Optional[int] = np.array([len(A_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ) -> List[str]:
"""simple docstring"""
return len(self.lengths )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: Tuple = self.params.max_model_input_size
_lowercase: Tuple = self.lengths > max_len
logger.info(f'''Splitting {sum(A_ )} too long sequences.''' )
def divide_chunks(A_ , A_ ):
return [l[i : i + n] for i in range(0 , len(A_ ) , A_ )]
_lowercase: Dict = []
_lowercase: Union[str, Any] = []
if self.params.mlm:
_lowercase , _lowercase: int = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
_lowercase , _lowercase: Dict = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
_lowercase: Optional[Any] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
_lowercase: Any = np.insert(A_ , 0 , A_ )
if sub_s[-1] != sep_id:
_lowercase: Optional[int] = np.insert(A_ , len(A_ ) , A_ )
assert len(A_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(A_ )
new_tok_ids.extend(A_ )
new_lengths.extend([len(A_ ) for l in sub_seqs] )
_lowercase: Optional[Any] = np.array(A_ )
_lowercase: List[str] = np.array(A_ )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: List[Any] = len(self )
_lowercase: Optional[Any] = self.lengths > 11
_lowercase: int = self.token_ids[indices]
_lowercase: List[str] = self.lengths[indices]
_lowercase: Optional[int] = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def lowercase_ ( self ) -> int:
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
_lowercase: Dict = self.params.special_tok_ids['''unk_token''']
_lowercase: int = len(self )
_lowercase: Tuple = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
_lowercase: Dict = (unk_occs / self.lengths) < 0.5
_lowercase: str = self.token_ids[indices]
_lowercase: Dict = self.lengths[indices]
_lowercase: int = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def lowercase_ ( self , A_ ) -> Optional[int]:
"""simple docstring"""
_lowercase: List[str] = [t[0] for t in batch]
_lowercase: Dict = [t[1] for t in batch]
assert len(A_ ) == len(A_ )
# Max for paddings
_lowercase: Tuple = max(A_ )
# Pad token ids
if self.params.mlm:
_lowercase: str = self.params.special_tok_ids['''pad_token''']
else:
_lowercase: Union[str, Any] = self.params.special_tok_ids['''unk_token''']
_lowercase: int = [list(t.astype(A_ ) ) + [pad_idx] * (max_seq_len_ - len(A_ )) for t in token_ids]
assert len(tk_ ) == len(A_ )
assert all(len(A_ ) == max_seq_len_ for t in tk_ )
_lowercase: str = torch.tensor(tk_ ) # (bs, max_seq_len_)
_lowercase: int = torch.tensor(A_ ) # (bs)
return tk_t, lg_t
| 353 | 1 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__snake_case = """<<<<<<< This should probably be modified because it mentions: """
__snake_case = """=======\n>>>>>>>\n"""
__snake_case = [
"""TextEncoderConfig""",
"""ByteTextEncoder""",
"""SubwordTextEncoder""",
"""encoder_config""",
"""maybe_build_from_corpus""",
"""manual_dir""",
]
__snake_case = [
# (pattern, replacement)
# Order is important here for some replacements
(R"""tfds\.core""", R"""datasets"""),
(R"""tf\.io\.gfile\.GFile""", R"""open"""),
(R"""tf\.([\w\d]+)""", R"""datasets.Value(\'\1\')"""),
(R"""tfds\.features\.Text\(\)""", R"""datasets.Value(\'string\')"""),
(R"""tfds\.features\.Text\(""", R"""datasets.Value(\'string\'),"""),
(R"""features\s*=\s*tfds.features.FeaturesDict\(""", R"""features=datasets.Features("""),
(R"""tfds\.features\.FeaturesDict\(""", R"""dict("""),
(R"""The TensorFlow Datasets Authors""", R"""The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"""),
(R"""tfds\.""", R"""datasets."""),
(R"""dl_manager\.manual_dir""", R"""self.config.data_dir"""),
(R"""self\.builder_config""", R"""self.config"""),
]
def __lowerCAmelCase ( lowercase : Optional[Any] ) -> List[str]:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _lowerCAmelCase ( snake_case_ ):
@staticmethod
def lowerCamelCase ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[str] = get_logger("datasets-cli/converting" )
snake_case : Optional[int] = tfds_path
snake_case : Union[str, Any] = datasets_directory
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
snake_case : Dict = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
snake_case : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
snake_case : Tuple = os.path.abspath(self._datasets_directory )
self._logger.info(F'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
snake_case : List[str] = []
snake_case : Optional[Any] = []
snake_case : List[str] = {}
if os.path.isdir(self._tfds_path ):
snake_case : Tuple = os.listdir(_SCREAMING_SNAKE_CASE )
else:
snake_case : Dict = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F'Looking at file {f_name}' )
snake_case : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(_SCREAMING_SNAKE_CASE , encoding="utf-8" ) as f:
snake_case : Dict = f.readlines()
snake_case : List[Any] = []
snake_case : Tuple = False
snake_case : int = False
snake_case : Optional[int] = []
for line in lines:
snake_case : Union[str, Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
snake_case : str = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
snake_case : Tuple = ""
continue
elif "from absl import logging" in out_line:
snake_case : List[str] = "from datasets import logging\n"
elif "getLogger" in out_line:
snake_case : List[str] = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
snake_case : Optional[Any] = True
snake_case : Optional[int] = list(filter(lambda UpperCamelCase__ : e in out_line , _SCREAMING_SNAKE_CASE ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_SCREAMING_SNAKE_CASE ) + "\n" )
out_lines.append(_SCREAMING_SNAKE_CASE )
out_lines.append(_SCREAMING_SNAKE_CASE )
continue
else:
for pattern, replacement in TO_CONVERT:
snake_case : Optional[int] = re.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
snake_case : Any = re.match(r"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , _SCREAMING_SNAKE_CASE )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
snake_case : Optional[Any] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
snake_case : Union[str, Any] = True
out_lines.append(_SCREAMING_SNAKE_CASE )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
snake_case : Tuple = f_name.replace(".py" , "" )
snake_case : int = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
self._logger.info(F'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_SCREAMING_SNAKE_CASE )
if needs_manual_update:
with_manual_update.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.writelines(_SCREAMING_SNAKE_CASE )
self._logger.info(F'Converted in {output_file}' )
for utils_file in utils_files:
try:
snake_case : str = os.path.basename(_SCREAMING_SNAKE_CASE )
snake_case : Union[str, Any] = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F'Moving {dest_folder} to {utils_file}' )
shutil.copy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except KeyError:
self._logger.error(F'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 713 |
"""simple docstring"""
import baseaa
def __lowerCAmelCase ( lowercase : str ) -> bytes:
"""simple docstring"""
return baseaa.aaaencode(string.encode("utf-8" ) )
def __lowerCAmelCase ( lowercase : bytes ) -> str:
"""simple docstring"""
return baseaa.aaadecode(lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 117 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
__A : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self , *_a , **_a ):
"""simple docstring"""
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , _a , )
super().__init__(*_a , **_a )
| 394 |
'''simple docstring'''
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
# we need a list not a string, so do something to change the type
a__ = arr.split(',' )
def lowercase__ ( self ):
"""simple docstring"""
a__ = [int(self.array[0] )] * len(self.array )
a__ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
a__ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
a__ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__A : str = input('please input some numbers:')
__A : int = SubArray(whole_array)
__A : str = array.solve_sub_array()
print(('the results is:', re))
| 394 | 1 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __snake_case ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
lowerCamelCase_ = TapasConfig.from_json_file(UpperCAmelCase_ )
# set absolute/relative position embeddings parameter
lowerCamelCase_ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowerCamelCase_ = TapasForQuestionAnswering(config=UpperCAmelCase_ )
elif task == "WTQ":
# run_task_main.py hparams
lowerCamelCase_ = 4
lowerCamelCase_ = True
# hparam_utils.py hparams
lowerCamelCase_ = 0.66_4694
lowerCamelCase_ = 0.20_7951
lowerCamelCase_ = 0.12_1194
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 0.035_2513
lowerCamelCase_ = TapasForQuestionAnswering(config=UpperCAmelCase_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowerCamelCase_ = 4
lowerCamelCase_ = False
# hparam_utils.py hparams
lowerCamelCase_ = 36.4519
lowerCamelCase_ = 0.90_3421
lowerCamelCase_ = 222.088
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = 0.76_3141
lowerCamelCase_ = TapasForQuestionAnswering(config=UpperCAmelCase_ )
elif task == "TABFACT":
lowerCamelCase_ = TapasForSequenceClassification(config=UpperCAmelCase_ )
elif task == "MLM":
lowerCamelCase_ = TapasForMaskedLM(config=UpperCAmelCase_ )
elif task == "INTERMEDIATE_PRETRAINING":
lowerCamelCase_ = TapasModel(config=UpperCAmelCase_ )
else:
raise ValueError(F'''Task {task} not supported.''' )
print(F'''Building PyTorch model from configuration: {config}''' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model (weights and configuration)
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCAmelCase_ )
# Save tokenizer files
print(F'''Save tokenizer files to {pytorch_dump_path}''' )
lowerCamelCase_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 )
tokenizer.save_pretrained(UpperCAmelCase_ )
print("Used relative position embeddings:" , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA."""
)
parser.add_argument(
"""--reset_position_index_per_cell""",
default=False,
action="""store_true""",
help="""Whether to use relative position embeddings or not. Defaults to True.""",
)
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--tapas_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained TAPAS model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
a_ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 709 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Optional[int] = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
a_ : List[str] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
a_ : Tuple = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = "whisper"
_lowerCamelCase = ["past_key_values"]
_lowerCamelCase = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCamelCase=5_1865 , UpperCamelCase=80 , UpperCamelCase=6 , UpperCamelCase=4 , UpperCamelCase=6 , UpperCamelCase=4 , UpperCamelCase=1536 , UpperCamelCase=1536 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=5_0257 , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="gelu" , UpperCamelCase=256 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.02 , UpperCamelCase=False , UpperCamelCase=1500 , UpperCamelCase=448 , UpperCamelCase=5_0256 , UpperCamelCase=5_0256 , UpperCamelCase=5_0256 , UpperCamelCase=None , UpperCamelCase=[220, 5_0256] , UpperCamelCase=False , UpperCamelCase=256 , UpperCamelCase=False , UpperCamelCase=0.05 , UpperCamelCase=10 , UpperCamelCase=2 , UpperCamelCase=0.0 , UpperCamelCase=10 , UpperCamelCase=0 , UpperCamelCase=7 , **UpperCamelCase , ):
"""simple docstring"""
lowerCamelCase_ = vocab_size
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = d_model
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = encoder_attention_heads
lowerCamelCase_ = decoder_layers
lowerCamelCase_ = decoder_attention_heads
lowerCamelCase_ = decoder_ffn_dim
lowerCamelCase_ = encoder_ffn_dim
lowerCamelCase_ = dropout
lowerCamelCase_ = attention_dropout
lowerCamelCase_ = activation_dropout
lowerCamelCase_ = activation_function
lowerCamelCase_ = init_std
lowerCamelCase_ = encoder_layerdrop
lowerCamelCase_ = decoder_layerdrop
lowerCamelCase_ = use_cache
lowerCamelCase_ = encoder_layers
lowerCamelCase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_ = max_source_positions
lowerCamelCase_ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowerCamelCase_ = classifier_proj_size
lowerCamelCase_ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ = apply_spec_augment
lowerCamelCase_ = mask_time_prob
lowerCamelCase_ = mask_time_length
lowerCamelCase_ = mask_time_min_masks
lowerCamelCase_ = mask_feature_prob
lowerCamelCase_ = mask_feature_length
lowerCamelCase_ = mask_feature_min_masks
lowerCamelCase_ = median_filter_width
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , suppress_tokens=UpperCamelCase , begin_suppress_tokens=UpperCamelCase , **UpperCamelCase , )
class snake_case ( lowercase ):
"""simple docstring"""
@property
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
lowerCamelCase_ = {0: "batch"}
else:
lowerCamelCase_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase , direction="inputs" )
return common_inputs
def snake_case ( self , UpperCamelCase , UpperCamelCase = -1 , UpperCamelCase = -1 , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = 2_2050 , UpperCamelCase = 5.0 , UpperCamelCase = 220 , ):
"""simple docstring"""
lowerCamelCase_ = OrderedDict()
lowerCamelCase_ = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCamelCase , framework=UpperCamelCase , sampling_rate=UpperCamelCase , time_duration=UpperCamelCase , frequency=UpperCamelCase , )
lowerCamelCase_ = encoder_inputs["input_features"].shape[2]
lowerCamelCase_ = encoder_sequence_length // 2 if self.use_past else seq_length
lowerCamelCase_ = super().generate_dummy_inputs(
preprocessor.tokenizer , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase_ = encoder_inputs.pop("input_features" )
lowerCamelCase_ = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
lowerCamelCase_ = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def snake_case ( self ):
"""simple docstring"""
return 1e-3
| 445 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class _a :
'''simple docstring'''
def __init__( self ,__a ,__a ) -> None:
if len(lowercase_ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
snake_case : Tuple = list(lowercase_ )
snake_case : str = degree
def __add__( self ,__a ) -> Polynomial:
if self.degree > polynomial_a.degree:
snake_case : List[Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree ,lowercase_ )
else:
snake_case : Optional[Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree ,lowercase_ )
def __sub__( self ,__a ) -> Polynomial:
return self + polynomial_a * Polynomial(0 ,[-1] )
def __neg__( self ) -> Polynomial:
return Polynomial(self.degree ,[-c for c in self.coefficients] )
def __mul__( self ,__a ) -> Polynomial:
snake_case : str = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree ,lowercase_ )
def snake_case_ ( self ,__a ) -> int | float:
snake_case : List[Any] = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self ) -> str:
snake_case : str = """"""
for i in range(self.degree ,-1 ,-1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(lowercase_ )
return polynomial
def __repr__( self ) -> str:
return self.__str__()
def snake_case_ ( self ) -> Polynomial:
snake_case : int = [0] * self.degree
for i in range(self.degree ):
snake_case : Union[str, Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 ,lowercase_ )
def snake_case_ ( self ,__a = 0 ) -> Polynomial:
snake_case : Tuple = [0] * (self.degree + 2)
snake_case : List[str] = constant
for i in range(self.degree + 1 ):
snake_case : Optional[int] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 ,lowercase_ )
def __eq__( self ,__a ) -> bool:
if not isinstance(lowercase_ ,lowercase_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self ,__a ) -> bool:
return not self.__eq__(lowercase_ )
| 116 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : Optional[int] ={
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 440 | 0 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case__ ( __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(__lowercase , __lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> Dict:
"""simple docstring"""
A__ : str = tmp_path / "cache"
A__ : int = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ : Dict = TextDatasetReader(__lowercase , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_text_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
A__ : List[str] = tmp_path / "cache"
A__ : str = {"text": "string"}
A__ : int = features.copy() if features else default_expected_features
A__ : Any = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : Optional[int] = TextDatasetReader(__lowercase , features=__lowercase , cache_dir=__lowercase ).read()
_check_text_dataset(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
A__ : Any = tmp_path / "cache"
A__ : Tuple = {"text": "string"}
A__ : str = TextDatasetReader(__lowercase , cache_dir=__lowercase , split=__lowercase ).read()
_check_text_dataset(__lowercase , __lowercase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> int:
"""simple docstring"""
if issubclass(__lowercase , __lowercase ):
A__ : List[str] = text_path
elif issubclass(__lowercase , __lowercase ):
A__ : Union[str, Any] = [text_path]
A__ : Optional[Any] = tmp_path / "cache"
A__ : Dict = {"text": "string"}
A__ : List[Any] = TextDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_text_dataset(__lowercase , __lowercase )
def snake_case__ ( __lowercase , __lowercase , __lowercase=("train",) ) -> str:
"""simple docstring"""
assert isinstance(__lowercase , __lowercase )
for split in splits:
A__ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
A__ : Optional[int] = tmp_path / "cache"
A__ : str = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A__ : Dict = TextDatasetReader({"train": text_path} , cache_dir=__lowercase , keep_in_memory=__lowercase ).read()
_check_text_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
"""simple docstring"""
A__ : int = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
A__ : List[str] = {"text": "string"}
A__ : int = features.copy() if features else default_expected_features
A__ : List[Any] = (
Features({feature: Value(__lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
A__ : Union[str, Any] = TextDatasetReader({"train": text_path} , features=__lowercase , cache_dir=__lowercase ).read()
_check_text_datasetdict(__lowercase , __lowercase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> int:
"""simple docstring"""
if split:
A__ : Dict = {split: text_path}
else:
A__ : Dict = "train"
A__ : int = {"train": text_path, "test": text_path}
A__ : List[str] = tmp_path / "cache"
A__ : Any = {"text": "string"}
A__ : Dict = TextDatasetReader(__lowercase , cache_dir=__lowercase ).read()
_check_text_datasetdict(__lowercase , __lowercase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() ) | 718 |
snake_case : Tuple = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def snake_case__ ( ) -> None:
"""simple docstring"""
A__ : Union[str, Any] = input("Enter message: " )
A__ : Tuple = input("Enter key [alphanumeric]: " )
A__ : Optional[Any] = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
A__ : Tuple = "encrypt"
A__ : str = encrypt_message(__lowercase , __lowercase )
elif mode.lower().startswith("d" ):
A__ : Optional[Any] = "decrypt"
A__ : Union[str, Any] = decrypt_message(__lowercase , __lowercase )
print(F'\n{mode.title()}ed message:' )
print(__lowercase )
def snake_case__ ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
return translate_message(__lowercase , __lowercase , "encrypt" )
def snake_case__ ( __lowercase , __lowercase ) -> str:
"""simple docstring"""
return translate_message(__lowercase , __lowercase , "decrypt" )
def snake_case__ ( __lowercase , __lowercase , __lowercase ) -> str:
"""simple docstring"""
A__ : Dict = []
A__ : Union[str, Any] = 0
A__ : List[Any] = key.upper()
for symbol in message:
A__ : List[str] = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__lowercase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__lowercase ):
A__ : Optional[int] = 0
else:
translated.append(__lowercase )
return "".join(__lowercase )
if __name__ == "__main__":
main() | 182 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=2 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=3 , snake_case_=0.6 , snake_case_=None , ) -> Tuple:
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = mask_ratio
_a = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_a = (image_size // patch_size) ** 2
_a = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self ) -> Optional[int]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> int:
_a = TFViTMAEModel(config=lowercase__ )
_a = model(lowercase__ , training=lowercase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Union[str, Any]:
_a = TFViTMAEForPreTraining(lowercase__ )
_a = model(lowercase__ , training=lowercase__ )
# expected sequence length = num_patches
_a = (self.image_size // self.patch_size) ** 2
_a = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_a = 1
_a = TFViTMAEForPreTraining(lowercase__ )
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(lowercase__ , training=lowercase__ )
_a = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
_a = self.prepare_config_and_inputs()
(_a) = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__UpperCAmelCase : List[str] = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : Any = False
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = TFViTMAEModelTester(self )
_a = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ , hidden_size=3_7 )
def __lowerCAmelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
def __lowerCAmelCase ( self ) -> int:
_a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowercase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase__ , tf.keras.layers.Layer ) )
def __lowerCAmelCase ( self ) -> Any:
_a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowercase__ )
_a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase__ )
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def __lowerCAmelCase ( self ) -> Tuple:
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase__ )
def __lowerCAmelCase ( self ) -> str:
# make the mask reproducible
np.random.seed(2 )
_a = self.model_tester.prepare_config_and_inputs_for_common()
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_a = model_class(lowercase__ )
_a = self._prepare_for_class(lowercase__ , lowercase__ )
_a = model(lowercase__ , noise=lowercase__ )
_a = copy.deepcopy(self._prepare_for_class(lowercase__ , lowercase__ ) )
_a = model(**lowercase__ , noise=lowercase__ )
_a = outputs_dict[0].numpy()
_a = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1E-6 )
def __lowerCAmelCase ( self ) -> str:
# make the mask reproducible
np.random.seed(2 )
_a = self.model_tester.prepare_config_and_inputs_for_common()
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(snake_case_ ):
_a = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowercase__ ):
_a = v.numpy()
else:
_a = np.array(lowercase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_a = model_class(lowercase__ )
_a = self._prepare_for_class(lowercase__ , lowercase__ )
_a = prepare_numpy_arrays(lowercase__ )
_a = model(lowercase__ , noise=lowercase__ )
_a = model(**lowercase__ , noise=lowercase__ )
self.assert_outputs_same(lowercase__ , lowercase__ )
def __lowerCAmelCase ( self , snake_case_ , snake_case_ , snake_case_ ) -> Optional[int]:
# make masks reproducible
np.random.seed(2 )
_a = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_a = tf.constant(lowercase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_a = tf_noise
super().check_pt_tf_models(lowercase__ , lowercase__ , lowercase__ )
def __lowerCAmelCase ( self ) -> Any:
# make mask reproducible
np.random.seed(2 )
_a = self.model_tester.prepare_config_and_inputs_for_common()
_a = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowercase__ )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(lowercase__ , lowercase__ ),)
if isinstance(lowercase__ , lowercase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowercase__ , "_keras_serializable" , lowercase__ )
}
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_a = tf.convert_to_tensor(lowercase__ )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
_a = main_layer_class(lowercase__ )
_a = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_a = tf.keras.Model(lowercase__ , outputs=main_layer(lowercase__ ) )
_a = model(lowercase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_a = os.path.join(lowercase__ , "keras_model.h5" )
model.save(lowercase__ )
_a = tf.keras.models.load_model(
lowercase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowercase__ , tf.keras.Model )
_a = model(lowercase__ )
self.assert_outputs_same(lowercase__ , lowercase__ )
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
# make mask reproducible
np.random.seed(2 )
_a = self.model_tester.prepare_config_and_inputs_for_common()
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_a = model_class(lowercase__ )
_a = self._prepare_for_class(lowercase__ , lowercase__ )
_a = model(lowercase__ , noise=lowercase__ )
if model_class.__name__ == "TFViTMAEModel":
_a = outputs.last_hidden_state.numpy()
_a = 0
else:
_a = outputs.logits.numpy()
_a = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase__ , saved_model=lowercase__ )
_a = model_class.from_pretrained(lowercase__ )
_a = model(lowercase__ , noise=lowercase__ )
if model_class.__name__ == "TFViTMAEModel":
_a = after_outputs["last_hidden_state"].numpy()
_a = 0
else:
_a = after_outputs["logits"].numpy()
_a = 0
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase__ , 1E-5 )
def __lowerCAmelCase ( self ) -> List[Any]:
# make mask reproducible
np.random.seed(2 )
_a = self.model_tester.prepare_config_and_inputs_for_common()
_a = int((config.image_size // config.patch_size) ** 2 )
_a = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_a = model_class(lowercase__ )
_a = self._prepare_for_class(lowercase__ , lowercase__ )
_a = model(lowercase__ , noise=lowercase__ )
_a = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowercase__ )
_a = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_a = model_class.from_config(model.config )
_a = new_model(lowercase__ ) # Build model
new_model.set_weights(model.get_weights() )
_a = new_model(lowercase__ , noise=lowercase__ )
self.assert_outputs_same(lowercase__ , lowercase__ )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __lowerCAmelCase ( self ) -> Optional[int]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def __lowerCAmelCase ( self ) -> int:
pass
@slow
def __lowerCAmelCase ( self ) -> Dict:
_a = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(lowercase__ )
def _lowercase ( ):
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self ) -> str:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def __lowerCAmelCase ( self ) -> int:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_a = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=lowercase__ , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_a = ViTMAEConfig()
_a = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_a = np.random.uniform(size=(1, num_patches) )
# forward pass
_a = model(**lowercase__ , noise=lowercase__ )
# verify the logits
_a = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , lowercase__ )
_a = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowercase__ , atol=1E-4 )
| 131 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__A : Tuple = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
__A : Optional[int] = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('utf-8').split()
__A : Any = '|'.join(sys.argv[1:])
__A : Optional[int] = re.compile(RF"""^({joined_dirs}).*?\.py$""")
__A : Tuple = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 575 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_: List[Any] = logging.get_logger(__name__)
lowerCAmelCase_: Union[str, Any] = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class a__ ( _UpperCAmelCase ):
snake_case_ = "ctrl"
snake_case_ = ["past_key_values"]
snake_case_ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self, _UpperCAmelCase=24_6534, _UpperCAmelCase=256, _UpperCAmelCase=1280, _UpperCAmelCase=8192, _UpperCAmelCase=48, _UpperCAmelCase=16, _UpperCAmelCase=0.1, _UpperCAmelCase=0.1, _UpperCAmelCase=1E-6, _UpperCAmelCase=0.02, _UpperCAmelCase=True, **_UpperCAmelCase, ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = n_positions
lowercase__ = n_embd
lowercase__ = n_layer
lowercase__ = n_head
lowercase__ = dff
lowercase__ = resid_pdrop
lowercase__ = embd_pdrop
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_range
lowercase__ = use_cache
super().__init__(**lowercase__ )
| 702 | """simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a__ ( _a ):
def __init__( self, _UpperCAmelCase, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = None, _UpperCAmelCase = False, _UpperCAmelCase = False, _UpperCAmelCase = None, **_UpperCAmelCase, ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase, split=_UpperCAmelCase, features=_UpperCAmelCase, cache_dir=_UpperCAmelCase, keep_in_memory=_UpperCAmelCase, streaming=_UpperCAmelCase, num_proc=_UpperCAmelCase, **_UpperCAmelCase, )
lowercase__ = path_or_paths if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else {self.split: path_or_paths}
lowercase__ = Text(
cache_dir=_UpperCAmelCase, data_files=_UpperCAmelCase, features=_UpperCAmelCase, **_UpperCAmelCase, )
def snake_case__ ( self ):
'''simple docstring'''
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase, download_mode=_UpperCAmelCase, verification_mode=_UpperCAmelCase, base_path=_UpperCAmelCase, num_proc=self.num_proc, )
lowercase__ = self.builder.as_dataset(
split=self.split, verification_mode=_UpperCAmelCase, in_memory=self.keep_in_memory )
return dataset
| 668 | 0 |
from math import pi
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 73 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_SCREAMING_SNAKE_CASE : str = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 344 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__SCREAMING_SNAKE_CASE : List[str] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
__SCREAMING_SNAKE_CASE : List[str] = ''' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'''
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
_lowerCamelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , '''schedulers/''' ) )
_lowerCamelCase = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , '''src/diffusers/schedulers/scheduling_ddpm.py''' ) , os.path.join(self.diffusers_dir , '''schedulers/scheduling_ddpm.py''' ) , )
def snake_case__ ( self ):
_lowerCamelCase = '''src/diffusers'''
shutil.rmtree(self.diffusers_dir )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
_lowerCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_lowerCamelCase = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_lowerCamelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 )
_lowerCamelCase = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_lowerCamelCase = os.path.join(self.diffusers_dir , '''new_code.py''' )
with open(lowerCamelCase__ , '''w''' , newline='''\n''' ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , '''r''' ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , REFERENCE_CODE + '''\n''' , )
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' , '''DDPMSchedulerOutput''' , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , re.sub('''DDPM''' , '''Test''' , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_lowerCamelCase = '''TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason'''
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub('''Bert''' , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' , '''TestSchedulerOutput''' , lowerCamelCase__ , overwrite_result=re.sub('''DDPM''' , '''Test''' , lowerCamelCase__ ) , )
| 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Tuple = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Any = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 623 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
a = logging.getLogger()
def _SCREAMING_SNAKE_CASE ( ) -> Any:
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_UpperCAmelCase = parser.parse_args()
return args.f
class _A ( __lowercase ):
def UpperCAmelCase ( self ):
_UpperCAmelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_SCREAMING_SNAKE_CASE , """argv""" , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_SCREAMING_SNAKE_CASE , 0.666 )
@slow
@require_torch_non_multi_gpu
def UpperCAmelCase ( self ):
_UpperCAmelCase = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_SCREAMING_SNAKE_CASE ) | 518 |
def _SCREAMING_SNAKE_CASE ( snake_case ) -> int:
if not numbers:
return 0
if not isinstance(snake_case , (list, tuple) ) or not all(
isinstance(snake_case , snake_case ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
_UpperCAmelCase = _UpperCAmelCase = _UpperCAmelCase = numbers[0]
for i in range(1 , len(snake_case ) ):
# update the maximum and minimum subarray products
_UpperCAmelCase = numbers[i]
if number < 0:
_UpperCAmelCase , _UpperCAmelCase = min_till_now, max_till_now
_UpperCAmelCase = max(snake_case , max_till_now * number )
_UpperCAmelCase = min(snake_case , min_till_now * number )
# update the maximum product found till now
_UpperCAmelCase = max(snake_case , snake_case )
return max_prod | 518 | 1 |
def __lowerCamelCase ( __lowerCAmelCase : str , __lowerCAmelCase : list[str] ) -> str:
__UpperCamelCase : Dict = """"""
for word_or_phrase in separated:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(__lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 515 |
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 515 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.