code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {'''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : List[Any] = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_A : List[str] = ['''input_ids''', '''attention_mask''']
_A : Union[str, Any] = None
def __init__( self : List[str] , __a : Union[str, Any]=None , __a : Dict=None , __a : Dict=None , __a : Optional[int]="<unk>" , __a : List[Any]="<s>" , __a : Union[str, Any]="</s>" , __a : int="<pad>" , __a : Any=False , __a : int=False , **__a : List[Any] , ) -> int:
"""simple docstring"""
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , add_prefix_space=__a , clean_up_tokenization_spaces=__a , **__a , )
__lowercase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __a ) != add_prefix_space:
__lowercase : Tuple = getattr(__a , pre_tok_state.pop("""type""" ) )
__lowercase : Dict = add_prefix_space
__lowercase : str = pre_tok_class(**__a )
__lowercase : List[str] = add_prefix_space
def lowerCAmelCase ( self : List[str] , *__a : Optional[Any] , **__a : Tuple ) -> BatchEncoding:
"""simple docstring"""
__lowercase : Tuple = kwargs.get("""is_split_into_words""" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__a , **__a )
def lowerCAmelCase ( self : str , *__a : int , **__a : Dict ) -> BatchEncoding:
"""simple docstring"""
__lowercase : Tuple = kwargs.get("""is_split_into_words""" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
""" pretokenized inputs.""" )
return super()._encode_plus(*__a , **__a )
def lowerCAmelCase ( self : int , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase : Any = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def lowerCAmelCase ( self : List[Any] , __a : "Conversation" ) -> List[int]:
"""simple docstring"""
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
__lowercase : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 306
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__lowercase : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowercase : Optional[Any] = model(__a )["""last_hidden_state"""]
__lowercase : Any = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__lowercase : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 306
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[Any] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''MobileNetV2FeatureExtractor''']
lowerCamelCase : Any = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
__lowercase : str = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Tuple = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Optional[Any] = True
if a[i].islower():
__lowercase : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCamelCase : Any = TypeVar('''T''')
class lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : Optional[Any] , __a : T ) -> int:
"""simple docstring"""
__lowercase : Tuple = data
__lowercase : Node[T] | None = None
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return F"{self.data}"
class lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self : str ) -> None:
"""simple docstring"""
__lowercase : Node[T] | None = None
def __iter__( self : str ) -> Iterator[T]:
"""simple docstring"""
__lowercase : Optional[int] = self.top
while node:
yield node.data
__lowercase : Union[str, Any] = node.next
def __str__( self : Union[str, Any] ) -> str:
"""simple docstring"""
return "->".join([str(__a ) for item in self] )
def __len__( self : List[str] ) -> int:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def lowerCAmelCase ( self : Dict ) -> bool:
"""simple docstring"""
return self.top is None
def lowerCAmelCase ( self : Optional[Any] , __a : T ) -> None:
"""simple docstring"""
__lowercase : Optional[int] = Node(__a )
if not self.is_empty():
__lowercase : Tuple = self.top
__lowercase : List[str] = node
def lowerCAmelCase ( self : Optional[Any] ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError("""pop from empty stack""" )
assert isinstance(self.top , __a )
__lowercase : List[Any] = self.top
__lowercase : List[Any] = self.top.next
return pop_node.data
def lowerCAmelCase ( self : str ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError("""peek from empty stack""" )
assert self.top is not None
return self.top.data
def lowerCAmelCase ( self : str ) -> None:
"""simple docstring"""
__lowercase : int = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 306
|
from scipy.stats import spearmanr
import datasets
lowerCamelCase : List[str] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCamelCase : List[str] = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCamelCase : Union[str, Any] = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = spearmanr(__a , __a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 306
| 1
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : Tuple = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] = None ):
__lowercase : List[Any] = tesseract_config if tesseract_config is not None else """"""
# apply OCR
__lowercase : Dict = to_pil_image(lowerCAmelCase_ )
__lowercase , __lowercase : Dict = pil_image.size
__lowercase : Tuple = pytesseract.image_to_data(lowerCAmelCase_ , lang=lowerCAmelCase_ , output_type="""dict""" , config=lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : int = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
__lowercase : int = [idx for idx, word in enumerate(lowerCAmelCase_ ) if not word.strip()]
__lowercase : Optional[int] = [word for idx, word in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Optional[int] = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Dict = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Dict = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Tuple = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowercase : Optional[Any] = []
for x, y, w, h in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : str = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase_ )
# finally, normalize the bounding boxes
__lowercase : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Dict = ['''pixel_values''']
def __init__( self : Tuple , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Optional[str] = None , __a : Optional[str] = "" , **__a : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Optional[Any] = size if size is not None else {"""height""": 224, """width""": 224}
__lowercase : int = get_size_dict(__a )
__lowercase : Optional[Any] = do_resize
__lowercase : Tuple = size
__lowercase : List[Any] = resample
__lowercase : Any = apply_ocr
__lowercase : int = ocr_lang
__lowercase : List[str] = tesseract_config
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Any = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
__lowercase : Any = (size["""height"""], size["""width"""])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Dict , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
__lowercase : Optional[int] = size if size is not None else self.size
__lowercase : Optional[int] = get_size_dict(__a )
__lowercase : List[Any] = resample if resample is not None else self.resample
__lowercase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowercase : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowercase : Tuple = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowercase : Optional[int] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
__lowercase : Tuple = [to_numpy_array(__a ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
__lowercase : Union[str, Any] = []
__lowercase : Union[str, Any] = []
for image in images:
__lowercase , __lowercase : int = apply_tesseract(__a , __a , __a )
words_batch.append(__a )
boxes_batch.append(__a )
if do_resize:
__lowercase : Optional[int] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__lowercase : str = [flip_channel_order(__a ) for image in images]
__lowercase : Dict = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = BatchFeature(data={"""pixel_values""": images} , tensor_type=__a )
if apply_ocr:
__lowercase : List[str] = words_batch
__lowercase : Optional[Any] = boxes_batch
return data
| 306
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 306
| 1
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase : Tuple = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 306
| 1
|
from __future__ import annotations
import math
def snake_case_ ( lowerCAmelCase_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
lowerCamelCase : Optional[Any] = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def snake_case_ ( lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
__lowercase : Optional[int] = []
for num in range(len(lowerCAmelCase_ ) ):
__lowercase : List[str] = 0
while 2 * i * i <= odd_composites[num]:
__lowercase : Union[str, Any] = odd_composites[num] - 2 * i * i
if is_prime(lowerCAmelCase_ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCAmelCase_ ) == n:
return list_nums
return []
def snake_case_ ( ):
return compute_nums(1 )[0]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 306
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 306
| 1
|
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = self.dummy_uncond_unet
__lowercase : Any = PNDMScheduler()
__lowercase : Dict = PNDMPipeline(unet=__a , scheduler=__a )
pndm.to(__a )
pndm.set_progress_bar_config(disable=__a )
__lowercase : List[str] = torch.manual_seed(0 )
__lowercase : List[str] = pndm(generator=__a , num_inference_steps=20 , output_type="""numpy""" ).images
__lowercase : Tuple = torch.manual_seed(0 )
__lowercase : Union[str, Any] = pndm(generator=__a , num_inference_steps=20 , output_type="""numpy""" , return_dict=__a )[0]
__lowercase : Any = image[0, -3:, -3:, -1]
__lowercase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase : Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = """google/ddpm-cifar10-32"""
__lowercase : str = UNetaDModel.from_pretrained(__a )
__lowercase : int = PNDMScheduler()
__lowercase : Optional[int] = PNDMPipeline(unet=__a , scheduler=__a )
pndm.to(__a )
pndm.set_progress_bar_config(disable=__a )
__lowercase : Optional[int] = torch.manual_seed(0 )
__lowercase : List[str] = pndm(generator=__a , output_type="""numpy""" ).images
__lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase : int = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 306
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 1
|
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase : Optional[int] = 16
lowerCamelCase : Any = 32
def snake_case_ ( lowerCAmelCase_ : Accelerator , lowerCAmelCase_ : int = 16 , lowerCAmelCase_ : str = "bert-base-cased" ):
__lowercase : int = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
__lowercase : Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCAmelCase_ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__lowercase : str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__lowercase : Any = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase : str = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ : Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase_ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__lowercase : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
__lowercase : str = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple ):
# Initialize accelerator
__lowercase : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase : List[str] = config["""lr"""]
__lowercase : List[str] = int(config["""num_epochs"""] )
__lowercase : List[str] = int(config["""seed"""] )
__lowercase : Union[str, Any] = int(config["""batch_size"""] )
__lowercase : Optional[Any] = args.model_name_or_path
set_seed(lowerCAmelCase_ )
__lowercase , __lowercase : Tuple = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase : int = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
# Instantiate optimizer
__lowercase : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__lowercase : int = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
__lowercase : Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
__lowercase : Optional[int] = 1
__lowercase : int = (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__lowercase : Dict = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase_ , )
else:
__lowercase : Tuple = DummyScheduler(lowerCAmelCase_ , total_num_steps=lowerCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : List[str] = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
__lowercase : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
__lowercase : Dict = 0
# Now we train the model
__lowercase : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
__lowercase : int = 0
__lowercase : Optional[int] = {}
for epoch in range(lowerCAmelCase_ , lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
__lowercase : str = model(**lowerCAmelCase_ )
__lowercase : List[str] = outputs.loss
__lowercase : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__lowercase : Optional[int] = 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase : Dict = model(**lowerCAmelCase_ )
__lowercase : Union[str, Any] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__lowercase , __lowercase : str = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase_ ) - 1:
__lowercase : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__lowercase : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
__lowercase : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCAmelCase_ )
__lowercase : Optional[int] = eval_metric["""accuracy"""]
if best_performance < eval_metric["accuracy"]:
__lowercase : Optional[Any] = eval_metric["""accuracy"""]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F"Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCAmelCase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCAmelCase_ , )
parser.add_argument(
"""--output_dir""" , type=lowerCAmelCase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--performance_lower_bound""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase_ , default=3 , help="""Number of train epochs.""" , )
__lowercase : List[str] = parser.parse_args()
__lowercase : str = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 306
|
def snake_case_ ( lowerCAmelCase_ : int = 200 ):
__lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__lowercase : List[str] = [0] * (pence + 1)
__lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 306
| 1
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any]=False ):
try:
__lowercase : int = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowercase : Tuple = default
else:
# KEY is set, convert it to True or False.
try:
__lowercase : Optional[int] = strtobool(lowerCAmelCase_ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
lowerCamelCase : Optional[int] = parse_flag_from_env('''RUN_SLOW''', default=False)
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
return unittest.skip("""Test was skipped""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Any ):
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Dict ):
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : int ):
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Any ):
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Any ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Any ):
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Dict ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Tuple ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : int ):
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Any ):
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[Any]=None ):
if test_case is None:
return partial(lowerCAmelCase_ , version=lowerCAmelCase_ )
return unittest.skipUnless(is_torch_version(""">=""" , lowerCAmelCase_ ) , F"test requires torch version >= {version}" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Tuple ):
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : str ):
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[str] ):
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(lowerCAmelCase_ )
lowerCamelCase : Tuple = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def snake_case_ ( lowerCAmelCase_ : Tuple ):
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(lowerCAmelCase_ )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_A : Dict = True
@classmethod
def lowerCAmelCase ( cls : str ) -> Any:
"""simple docstring"""
__lowercase : Tuple = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase ( cls : int ) -> Optional[Any]:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(__a )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : int , __a : Union[mock.Mock, List[mock.Mock]] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = mocks if isinstance(__a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
__lowercase : Optional[Any] = AcceleratorState()
__lowercase : List[str] = tensor[None].clone().to(state.device )
__lowercase : int = gather(lowerCAmelCase_ ).cpu()
__lowercase : Optional[int] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCAmelCase_ ):
return False
return True
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : int , __a : Optional[int] , __a : Any ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = returncode
__lowercase : List[str] = stdout
__lowercase : Optional[Any] = stderr
async def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ):
while True:
__lowercase : Tuple = await stream.readline()
if line:
callback(lowerCAmelCase_ )
else:
break
async def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Optional[int]=False ):
if echo:
print("""\nRunning: """ , """ """.join(lowerCAmelCase_ ) )
__lowercase : List[Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCAmelCase_ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCAmelCase_ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowercase : Optional[Any] = []
__lowercase : Any = []
def tee(lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple="" ):
__lowercase : Optional[int] = line.decode("""utf-8""" ).rstrip()
sink.append(lowerCAmelCase_ )
if not quiet:
print(lowerCAmelCase_ , lowerCAmelCase_ , file=lowerCAmelCase_ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCAmelCase_ : tee(lowerCAmelCase_ , lowerCAmelCase_ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCAmelCase_ : tee(lowerCAmelCase_ , lowerCAmelCase_ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=lowerCAmelCase_ , )
return _RunOutput(await p.wait() , lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Dict=180 , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : List[str]=True ):
__lowercase : Optional[Any] = asyncio.get_event_loop()
__lowercase : Optional[Any] = loop.run_until_complete(
_stream_subprocess(lowerCAmelCase_ , env=lowerCAmelCase_ , stdin=lowerCAmelCase_ , timeout=lowerCAmelCase_ , quiet=lowerCAmelCase_ , echo=lowerCAmelCase_ ) )
__lowercase : Optional[Any] = """ """.join(lowerCAmelCase_ )
if result.returncode > 0:
__lowercase : Optional[int] = """\n""".join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class lowerCAmelCase ( __a ):
'''simple docstring'''
pass
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict=False ):
try:
__lowercase : Optional[Any] = subprocess.check_output(lowerCAmelCase_ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCAmelCase_ , """decode""" ):
__lowercase : List[str] = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(lowerCAmelCase_ )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 306
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a )
| 306
| 1
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 306
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
for attribute in key.split(""".""" ):
__lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__lowercase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase : Dict = value
elif weight_type == "weight_g":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowercase : List[Any] = value
elif weight_type == "bias":
__lowercase : int = value
elif weight_type == "running_mean":
__lowercase : List[Any] = value
elif weight_type == "running_var":
__lowercase : int = value
elif weight_type == "num_batches_tracked":
__lowercase : int = value
elif weight_type == "inv_freq":
__lowercase : Optional[Any] = value
else:
__lowercase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = []
__lowercase : Any = fairseq_model.state_dict()
__lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase : Tuple = True
if "*" in mapped_key:
__lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
__lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "pos_bias_u" in name:
__lowercase : Any = None
elif "pos_bias_v" in name:
__lowercase : Tuple = None
elif "weight_g" in name:
__lowercase : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__lowercase : Dict = """weight_v"""
elif "bias" in name:
__lowercase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase : str = """weight"""
elif "running_mean" in name:
__lowercase : str = """running_mean"""
elif "inv_freq" in name:
__lowercase : List[Any] = """inv_freq"""
elif "running_var" in name:
__lowercase : Any = """running_var"""
elif "num_batches_tracked" in name:
__lowercase : Any = """num_batches_tracked"""
else:
__lowercase : Optional[int] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowercase : int = name.split(""".""" )
__lowercase : Optional[Any] = int(items[0] )
__lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ):
if config_path is not None:
__lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" )
else:
__lowercase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase : Tuple = """rotary"""
if is_finetuned:
if dict_path:
__lowercase : Any = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : List[Any] = target_dict.pad_index
__lowercase : Optional[int] = target_dict.bos_index
__lowercase : List[Any] = target_dict.eos_index
__lowercase : List[str] = len(target_dict.symbols )
__lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase : int = 0
__lowercase : Any = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
__lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False
__lowercase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
__lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
__lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
__lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
__lowercase : Dict = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : Any = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 306
| 1
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = 0
@slow
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__lowercase : List[Any] = AutoTokenizer.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__a ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__lowercase : Dict = AutoTokenizer.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__a ) , 0 )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Dict = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase : Any = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = AutoConfig.from_pretrained(__a )
self.assertIsInstance(__a , __a )
# Check that tokenizer_type ≠ model_type
__lowercase : List[str] = AutoTokenizer.from_pretrained(__a , config=__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__a , """vocab.txt""" ) )
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(__a , tokenizer_type="""bert""" , use_fast=__a )
self.assertIsInstance(__a , __a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__a , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__a , """merges.txt""" ) )
__lowercase : Tuple = AutoTokenizer.from_pretrained(__a , tokenizer_type="""gpt2""" , use_fast=__a )
self.assertIsInstance(__a , __a )
@require_tokenizers
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__a , """vocab.txt""" ) )
__lowercase : Any = AutoTokenizer.from_pretrained(__a , tokenizer_type="""bert""" )
self.assertIsInstance(__a , __a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__a , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__a , """merges.txt""" ) )
__lowercase : Tuple = AutoTokenizer.from_pretrained(__a , tokenizer_type="""gpt2""" )
self.assertIsInstance(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
with pytest.raises(__a ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__lowercase : Any = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
if isinstance(__a , __a ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __a )
else:
self.assertEqual(tokenizer.do_lower_case , __a )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__a , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
__lowercase : Optional[Any] = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = TOKENIZER_MAPPING.values()
__lowercase : List[str] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__a )
@require_tokenizers
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__a ) , __a )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , __a )
@require_tokenizers
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=__a )
__lowercase : Optional[int] = """Hello, world. How are you?"""
__lowercase : List[Any] = tokenizer.tokenize(__a )
self.assertEqual("""[UNK]""" , tokens[0] )
__lowercase : Tuple = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=__a )
__lowercase : str = tokenizer.tokenize(__a )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(__a ) , __a )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : Any = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__a , __a )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = get_tokenizer_config("""bert-base-cased""" )
__lowercase : List[str] = config.pop("""_commit_hash""" , __a )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__a , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__lowercase : str = get_tokenizer_config(__a )
self.assertDictEqual(__a , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__lowercase : str = AutoTokenizer.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : List[Any] = get_tokenizer_config(__a )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __a )
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
__lowercase : List[Any] = CustomTokenizer.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __a )
# Can register in two steps
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__a , fast_tokenizer_class=__a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__a , slow_tokenizer_class=__a , fast_tokenizer_class=__a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoTokenizer.register(__a , fast_tokenizer_class=__a )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase : int = BertTokenizerFast.from_pretrained(__a )
bert_tokenizer.save_pretrained(__a )
__lowercase : str = CustomTokenizerFast.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : List[str] = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , __a )
__lowercase : int = AutoTokenizer.from_pretrained(__a , use_fast=__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
with self.assertRaises(__a ):
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
__lowercase : Tuple = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a )
__lowercase : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(__a , trust_remote_code=__a )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowercase : List[str] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a , use_fast=__a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(__a , trust_remote_code=__a , use_fast=__a )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Tuple = False
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = NewTokenizer
_A : Union[str, Any] = False
try:
AutoConfig.register("""custom""" , __a )
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
AutoTokenizer.register(__a , fast_tokenizer_class=__a )
# If remote code is not set, the default is to use local
__lowercase : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowercase : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__lowercase : List[str] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a , use_fast=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__lowercase : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
__lowercase : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a , use_fast=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__a )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowercase : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__a , use_fast=__a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
__a , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowercase : List[Any] = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
__a , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(__a , revision="""aaaaaa""" )
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__lowercase : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 306
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
__lowercase : str = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , __a : str , __a : int , __a : int ) -> Optional[Any]:
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError("""Destination width/height should be > 0""" )
__lowercase : Tuple = img
__lowercase : Optional[int] = img.shape[1]
__lowercase : Union[str, Any] = img.shape[0]
__lowercase : Any = dst_width
__lowercase : List[Any] = dst_height
__lowercase : Tuple = self.src_w / self.dst_w
__lowercase : Optional[Any] = self.src_h / self.dst_h
__lowercase : Optional[Any] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowercase : List[Any] = self.img[self.get_y(__a )][self.get_x(__a )]
def lowerCAmelCase ( self : Union[str, Any] , __a : int ) -> int:
"""simple docstring"""
return int(self.ratio_x * x )
def lowerCAmelCase ( self : Union[str, Any] , __a : int ) -> int:
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCamelCase ,lowerCamelCase : List[str] = 8_00, 6_00
lowerCamelCase : Dict = imread('''image_data/lena.jpg''', 1)
lowerCamelCase : List[str] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 306
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
| 306
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = ['''pixel_values''']
def __init__( self : Tuple , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PIL.Image.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , **__a : Optional[Any] , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Tuple = size if size is not None else {"""height""": 256, """width""": 256}
__lowercase : Tuple = get_size_dict(__a )
__lowercase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Tuple = get_size_dict(__a , param_name="""crop_size""" )
__lowercase : int = do_resize
__lowercase : Dict = size
__lowercase : int = resample
__lowercase : Union[str, Any] = do_center_crop
__lowercase : Optional[Any] = crop_size
__lowercase : Tuple = do_rescale
__lowercase : str = rescale_factor
__lowercase : Union[str, Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowerCAmelCase ( self : Dict , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PIL.Image.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Dict , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Any = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return resize(
__a , size=(size["""height"""], size["""width"""]) , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : str = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : List[str] , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : List[Any] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : str , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : int , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : Optional[int]=None , __a : bool = None , __a : Dict[str, int] = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : str , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : int = do_resize if do_resize is not None else self.do_resize
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Any = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : List[str] = image_mean if image_mean is not None else self.image_mean
__lowercase : Any = image_std if image_std is not None else self.image_std
__lowercase : Optional[int] = size if size is not None else self.size
__lowercase : Optional[Any] = get_size_dict(__a )
__lowercase : str = crop_size if crop_size is not None else self.crop_size
__lowercase : Any = get_size_dict(__a , param_name="""crop_size""" )
__lowercase : Tuple = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__lowercase : int = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : Optional[int] = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : List[Any] = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : List[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : List[Any] = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 306
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 306
| 1
|
from math import factorial
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("""Please enter positive integers for n and k where n >= k""" )
return factorial(lowerCAmelCase_ ) // (factorial(lowerCAmelCase_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
'''If a class of 40 students must be arranged into groups of''',
f'''4 for group projects, there are {combinations(40, 4)} ways''',
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f'''are {combinations(10, 3)} ways that first, second and''',
'''third place can be awarded.''',
)
| 306
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : List[str] = 2_56
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
__lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( lowerCAmelCase_ : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , *,
__a : float = 0.85 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = duplication_jaccard_threshold
__lowercase : Optional[Any] = NUM_PERM
__lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase : List[str] = defaultdict(__a )
def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None:
"""simple docstring"""
__lowercase : List[Any] = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase : Dict = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase : List[str] = [base] + list(__a )
# reformat the cluster to be a list of dict
__lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def lowerCAmelCase ( self : Any , __a : int ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase , __lowercase : Union[str, Any] = element
__lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ):
__lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[str] = get_tokens(lowerCAmelCase_ )
__lowercase : Dict = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : List[str] = None
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = []
for elementa in cluster:
__lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase : Dict = 1
extremes.append(lowerCAmelCase_ )
return extremes
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
global _shared_dataset
__lowercase : Tuple = dataset
__lowercase : Optional[int] = []
__lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ):
__lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase : int = {}
__lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase : Optional[Any] = element
__lowercase : int = duplicate_indices - set(extreme_dict.keys() )
__lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(lowerCAmelCase_ )}" )
print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" )
print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" )
return ds_filter, duplicate_clusters
| 306
| 1
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __lt__( self : List[Any] , __a : Optional[Any] ) -> Any:
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : Any , __a : int ) -> Union[str, Any]:
"""simple docstring"""
return self[-1] == other[-1]
def snake_case_ ( lowerCAmelCase_ : list ):
__lowercase : list[Stack] = []
# sort into stacks
for element in collection:
__lowercase : str = Stack([element] )
__lowercase : Dict = bisect_left(lowerCAmelCase_ , lowerCAmelCase_ )
if i != len(lowerCAmelCase_ ):
stacks[i].append(lowerCAmelCase_ )
else:
stacks.append(lowerCAmelCase_ )
# use a heap-based merge to merge stack efficiently
__lowercase : Any = merge(*(reversed(lowerCAmelCase_ ) for stack in stacks) )
return collection
if __name__ == "__main__":
lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(''',''')]
print(patience_sort(unsorted))
| 306
|
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''image_processor''', '''feature_extractor''']
_A : List[Any] = '''TvltImageProcessor'''
_A : Optional[int] = '''TvltFeatureExtractor'''
def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__(image_processor=__a , feature_extractor=__a )
__lowercase : Union[str, Any] = image_processor
__lowercase : Tuple = feature_extractor
def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowercase : Tuple = None
if images is not None:
__lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
__lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
__lowercase : Optional[Any] = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
__lowercase : Tuple = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.image_processor.model_input_names
__lowercase : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 306
| 1
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Any = {'''vocab_file''': '''vocab.txt'''}
lowerCamelCase : Dict = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
lowerCamelCase : Tuple = {
'''openbmb/cpm-ant-10b''': 10_24,
}
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : str = collections.OrderedDict()
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as reader:
__lowercase : Any = reader.readlines()
for index, token in enumerate(lowerCAmelCase_ ):
__lowercase : List[Any] = token.rstrip("""\n""" )
__lowercase : str = index
return vocab
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : str , __a : Union[str, Any] , __a : Dict="<unk>" , __a : Tuple=200 ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = vocab
__lowercase : List[Any] = unk_token
__lowercase : List[Any] = max_input_chars_per_word
def lowerCAmelCase ( self : Optional[Any] , __a : int ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = list(__a )
if len(__a ) > self.max_input_chars_per_word:
return [self.unk_token]
__lowercase : Union[str, Any] = 0
__lowercase : str = []
while start < len(__a ):
__lowercase : str = len(__a )
__lowercase : List[str] = None
while start < end:
__lowercase : Dict = """""".join(chars[start:end] )
if substr in self.vocab:
__lowercase : int = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__a )
__lowercase : str = end
return sub_tokens
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = VOCAB_FILES_NAMES
_A : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Dict = ['''input_ids''', '''attention_mask''']
_A : Dict = False
def __init__( self : Tuple , __a : List[str] , __a : List[str]="<d>" , __a : List[str]="</d>" , __a : Optional[int]="<s>" , __a : Tuple="</s>" , __a : Optional[int]="<pad>" , __a : Dict="<unk>" , __a : str="</n>" , __a : List[str]="</_>" , __a : Optional[int]="left" , **__a : Any , ) -> List[Any]:
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=__a , eod_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , unk_token=__a , line_token=__a , space_token=__a , padding_side=__a , **__a , )
__lowercase : Dict = bod_token
__lowercase : List[str] = eod_token
__lowercase : Tuple = load_vocab(__a )
__lowercase : Union[str, Any] = self.encoder[space_token]
__lowercase : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowercase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __a : x[1] ) )
__lowercase : List[str] = {v: k for k, v in self.encoder.items()}
__lowercase : Dict = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Optional[Any] , __a : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = []
for x in jieba.cut(__a , cut_all=__a ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__a ) )
return output_tokens
def lowerCAmelCase ( self : Tuple , __a : Any , **__a : Tuple ) -> int:
"""simple docstring"""
__lowercase : str = [i for i in token_ids if i >= 0]
__lowercase : Union[str, Any] = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__a , **__a )
def lowerCAmelCase ( self : List[Any] , __a : str ) -> Any:
"""simple docstring"""
return token in self.encoder
def lowerCAmelCase ( self : int , __a : List[str] ) -> str:
"""simple docstring"""
return "".join(__a )
def lowerCAmelCase ( self : List[Any] , __a : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Union[str, Any] , __a : List[str] ) -> Any:
"""simple docstring"""
return self.decoder.get(__a , self.unk_token )
def lowerCAmelCase ( self : Any , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(__a ):
__lowercase : List[str] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
__lowercase : int = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
__lowercase : str = 0
if " " in self.encoder:
__lowercase : Tuple = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
__lowercase : Optional[Any] = self.encoder["""\n"""]
del self.encoder["\n"]
__lowercase : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __a : x[1] ) )
with open(__a , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
""" Please check that the vocabulary is not corrupted!""" )
__lowercase : Any = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : int , __a : List[int] , __a : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a ))
return [1] + ([0] * len(__a ))
| 306
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : int = batch_size
__lowercase : Any = seq_length
__lowercase : str = is_training
__lowercase : str = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : int = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : Union[str, Any] = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : Tuple = scope
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
__lowercase : str = model(__a , attention_mask=__a )
__lowercase : List[Any] = model(__a )
__lowercase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowercase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Any = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[str] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = False
_A : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = ()
_A : List[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Optional[Any] = True
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = EsmModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : List[str] = EsmEmbeddings(config=__a )
__lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowercase : int = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : Optional[Any] = EsmEmbeddings(config=__a )
__lowercase : Optional[int] = torch.empty(2 , 4 , 30 )
__lowercase : Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( __a ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[str] = model(__a )[0]
__lowercase : Union[str, Any] = 33
__lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
__lowercase : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : Any = model(__a )[0]
# compare the actual values for a slice.
__lowercase : int = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 306
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : str = {'''vocab_file''': '''spm_char.model'''}
lowerCamelCase : Any = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
lowerCamelCase : str = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = VOCAB_FILES_NAMES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , __a : Any , __a : Any="<s>" , __a : Tuple="</s>" , __a : Union[str, Any]="<unk>" , __a : str="<pad>" , __a : Optional[Dict[str, Any]] = None , **__a : int , ) -> None:
"""simple docstring"""
__lowercase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowercase : int = vocab_file
__lowercase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
@property
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : Any = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = self.__dict__.copy()
__lowercase : Optional[int] = None
return state
def __setstate__( self : Optional[Any] , __a : int ) -> List[str]:
"""simple docstring"""
__lowercase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase : List[Any] = {}
__lowercase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : int , __a : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__a , out_type=__a )
def lowerCAmelCase ( self : Any , __a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(__a )
def lowerCAmelCase ( self : List[Any] , __a : List[str] ) -> int:
"""simple docstring"""
__lowercase : Tuple = self.sp_model.IdToPiece(__a )
return token
def lowerCAmelCase ( self : Union[str, Any] , __a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = []
__lowercase : List[Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__a ) + token
__lowercase : List[Any] = []
else:
current_sub_tokens.append(__a )
out_string += self.sp_model.decode(__a )
return out_string.strip()
def lowerCAmelCase ( self : List[Any] , __a : str , __a : List[str]=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : Optional[Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
__lowercase : Dict = [1]
if token_ids_a is None:
return ([0] * len(__a )) + suffix_ones
return ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def lowerCAmelCase ( self : str , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : int = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
__lowercase : int = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
| 306
|
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 306
| 1
|
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase : Tuple = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase ( cls : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = TOKEN
HfFolder.save_token(__a )
@classmethod
def lowerCAmelCase ( cls : str ) -> Optional[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-config-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-config""" )
except HTTPError:
pass
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""test-config""" , use_auth_token=self._token )
__lowercase : str = BertConfig.from_pretrained(F"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__a , repo_id="""test-config""" , push_to_hub=__a , use_auth_token=self._token )
__lowercase : List[str] = BertConfig.from_pretrained(F"{USER}/test-config" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub("""valid_org/test-config-org""" , use_auth_token=self._token )
__lowercase : Tuple = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__a , repo_id="""valid_org/test-config-org""" , push_to_hub=__a , use_auth_token=self._token )
__lowercase : List[str] = BertConfig.from_pretrained("""valid_org/test-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__a , getattr(__a , __a ) )
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
CustomConfig.register_for_auto_class()
__lowercase : int = CustomConfig(attribute=42 )
config.push_to_hub("""test-dynamic-config""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {"""AutoConfig""": """custom_configuration.CustomConfig"""} )
__lowercase : Optional[int] = AutoConfig.from_pretrained(F"{USER}/test-dynamic-config" , trust_remote_code=__a )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , """CustomConfig""" )
self.assertEqual(new_config.attribute , 42 )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowercase : List[str] = c.n_embd + 1 # int
__lowercase : Union[str, Any] = c.resid_pdrop + 1.0 # float
__lowercase : List[str] = not c.scale_attn_weights # bool
__lowercase : int = c.summary_type + """foo""" # str
c.update_from_string(
F"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}" )
self.assertEqual(__a , c.n_embd , """mismatch for key: n_embd""" )
self.assertEqual(__a , c.resid_pdrop , """mismatch for key: resid_pdrop""" )
self.assertEqual(__a , c.scale_attn_weights , """mismatch for key: scale_attn_weights""" )
self.assertEqual(__a , c.summary_type , """mismatch for key: summary_type""" )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = PretrainedConfig()
__lowercase : Any = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
__a , ["""is_encoder_decoder""", """_name_or_path""", """_commit_hash""", """transformers_version"""] )
__lowercase : Dict = [key for key, value in config_common_kwargs.items() if value == getattr(__a , __a )]
if len(__a ) > 0:
raise ValueError(
"""The following keys are set with the default values in"""
""" `test_configuration_common.config_common_kwargs` pick another value for them:"""
F" {', '.join(__a )}." )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
with self.assertRaises(__a ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowercase : List[str] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" )
__lowercase : List[Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert-subfolder""" , subfolder="""bert""" )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = mock.Mock()
__lowercase : Dict = 500
__lowercase : List[str] = {}
__lowercase : List[Any] = HTTPError
__lowercase : List[str] = {}
# Download this model to make sure it's in the cache.
__lowercase : int = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=__a ) as mock_head:
__lowercase : Union[str, Any] = BertConfig.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : List[str] = BertConfig.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json""" )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : Dict = AutoConfig.from_pretrained("""bert-base-cased""" )
__lowercase : Optional[int] = ["""config.4.0.0.json"""]
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(__a )
__lowercase : Dict = 2
json.dump(configuration.to_dict() , open(os.path.join(__a , """config.4.0.0.json""" ) , """w""" ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowercase : List[Any] = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowercase : Union[str, Any] = ["""config.42.0.0.json"""]
__lowercase : List[str] = 768
configuration.save_pretrained(__a )
shutil.move(os.path.join(__a , """config.4.0.0.json""" ) , os.path.join(__a , """config.42.0.0.json""" ) )
__lowercase : List[str] = AutoConfig.from_pretrained(__a )
self.assertEqual(new_configuration.hidden_size , 768 )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : Union[str, Any] = """hf-internal-testing/test-two-configs"""
import transformers as new_transformers
__lowercase : int = """v4.0.0"""
__lowercase , __lowercase : Optional[int] = new_transformers.models.auto.AutoConfig.from_pretrained(
__a , return_unused_kwargs=__a )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(__a , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowercase : str = """v3.0.0"""
__lowercase : Tuple = old_transformers.models.auto.AutoConfig.from_pretrained(__a )
self.assertEqual(old_configuration.hidden_size , 768 )
| 306
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = (DPMSolverSDEScheduler,)
_A : Dict = 10
def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**__a )
return config
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.scheduler_classes[0]
__lowercase : List[str] = self.get_scheduler_config()
__lowercase : Any = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[Any] = self.dummy_model()
__lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Optional[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Optional[Any] = scheduler.step(__a , __a , __a )
__lowercase : str = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowercase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[int] = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Dict = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Dict = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[int] = model(__a , __a )
__lowercase : Optional[int] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : List[str] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config()
__lowercase : Optional[int] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : int = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase : int = scheduler.scale_model_input(__a , __a )
__lowercase : List[str] = model(__a , __a )
__lowercase : List[str] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : List[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.scheduler_classes[0]
__lowercase : List[Any] = self.get_scheduler_config()
__lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : List[str] = self.dummy_model()
__lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
__lowercase : str = sample.to(__a )
for t in scheduler.timesteps:
__lowercase : List[Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Any = scheduler.step(__a , __a , __a )
__lowercase : Optional[Any] = output.prev_sample
__lowercase : Any = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 306
| 1
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : int , __a : List[str] , __a : List[Any]=False , __a : Optional[int]=True , __a : Union[str, Any]=False , __a : Optional[int]="<s>" , __a : str="</s>" , __a : Dict="<unk>" , __a : List[Any]="<sep>" , __a : Tuple="<pad>" , __a : Tuple="<cls>" , __a : Any="<mask>" , __a : List[Any]=["<eop>", "<eod>"] , __a : Optional[Dict[str, Any]] = None , **__a : Tuple , ) -> None:
"""simple docstring"""
__lowercase : str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__lowercase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowercase : List[Any] = 3
__lowercase : Dict = do_lower_case
__lowercase : Any = remove_space
__lowercase : List[Any] = keep_accents
__lowercase : Any = vocab_file
__lowercase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
__lowercase : str = jieba
__lowercase : int = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : Dict = self.__dict__.copy()
__lowercase : List[Any] = None
return state
def __setstate__( self : Optional[int] , __a : Any ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase : Optional[int] = {}
__lowercase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Optional[Any] , __a : int ) -> Optional[Any]:
"""simple docstring"""
if self.remove_space:
__lowercase : str = """ """.join(inputs.strip().split() )
else:
__lowercase : Union[str, Any] = inputs
__lowercase : List[str] = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__lowercase : str = unicodedata.normalize("""NFKD""" , __a )
__lowercase : Tuple = """""".join([c for c in outputs if not unicodedata.combining(__a )] )
if self.do_lower_case:
__lowercase : Optional[Any] = outputs.lower()
return outputs
def lowerCAmelCase ( self : Tuple , __a : str ) -> List[str]:
"""simple docstring"""
__lowercase : Any = self.preprocess_text(__a )
__lowercase : Optional[Any] = self.sp_model.encode(__a , out_type=__a )
__lowercase : str = []
for piece in pieces:
if len(__a ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__lowercase : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__a , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowercase : Any = cur_pieces[1:]
else:
__lowercase : Any = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__a )
else:
new_pieces.append(__a )
return new_pieces
def lowerCAmelCase ( self : Optional[Any] , __a : Dict ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.PieceToId(__a )
def lowerCAmelCase ( self : int , __a : Dict ) -> Tuple:
"""simple docstring"""
return self.sp_model.IdToPiece(__a )
def lowerCAmelCase ( self : Tuple , __a : int ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """""".join(__a ).replace(__a , """ """ ).strip()
return out_string
def lowerCAmelCase ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : List[str] = [self.sep_token_id]
__lowercase : Dict = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase ( self : Any , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is not None:
return ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1, 1]
return ([0] * len(__a )) + [1, 1]
def lowerCAmelCase ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : Optional[Any] = [self.sep_token_id]
__lowercase : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase ( self : Union[str, Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : List[str] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
__lowercase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def lowerCAmelCase ( self : Dict , *__a : List[Any] , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = super()._decode(*__a , **__a )
__lowercase : str = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 306
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : str = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase : List[str] = args.per_device_eval_batch_size
lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : List[str] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ )
# start time
__lowercase : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowercase : int = time.time()
__lowercase : Union[str, Any] = end_time - start_time
__lowercase : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names
lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def snake_case_ ( lowerCAmelCase_ : int ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowercase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowercase : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowercase : Any = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ )
__lowercase : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowercase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowercase : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCamelCase : Tuple = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase : Union[str, Any] = default_data_collator
lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowercase : int = postprocess_qa_predictions(
examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowercase : Optional[int] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ )
lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def snake_case_ ( lowerCAmelCase_ : str ):
return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase : int = 0.0
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = timeit.default_timer()
lowerCamelCase : List[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : Dict = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 306
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[Any] = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['''PoolFormerFeatureExtractor''']
lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 306
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''nllb-moe'''
_A : List[str] = ['''past_key_values''']
_A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any:
"""simple docstring"""
__lowercase : int = vocab_size
__lowercase : List[Any] = max_position_embeddings
__lowercase : Tuple = d_model
__lowercase : str = encoder_ffn_dim
__lowercase : List[str] = encoder_layers
__lowercase : int = encoder_attention_heads
__lowercase : List[Any] = decoder_ffn_dim
__lowercase : int = decoder_layers
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : Union[str, Any] = dropout
__lowercase : str = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : List[Any] = activation_function
__lowercase : List[str] = init_std
__lowercase : Optional[int] = encoder_layerdrop
__lowercase : str = decoder_layerdrop
__lowercase : Dict = use_cache
__lowercase : Optional[Any] = encoder_layers
__lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : List[Any] = router_z_loss_coef
__lowercase : Tuple = router_aux_loss_coef
__lowercase : str = decoder_sparse_step
__lowercase : Any = encoder_sparse_step
__lowercase : str = num_experts
__lowercase : List[Any] = expert_capacity
__lowercase : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase : Optional[int] = router_dtype
__lowercase : Any = router_ignore_padding_tokens
__lowercase : Optional[Any] = batch_prioritized_routing
__lowercase : str = second_expert_policy
__lowercase : List[str] = normalize_router_prob_before_dropping
__lowercase : List[Any] = moe_eval_capacity_token_fraction
__lowercase : List[str] = moe_token_dropout
__lowercase : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : int ):
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__lowercase : str = [True] * (num + 1)
__lowercase : Optional[int] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , lowerCAmelCase_ ):
__lowercase : Optional[Any] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Any = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 306
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[Any] = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['''PoolFormerFeatureExtractor''']
lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 306
| 1
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[str] = np.full((len(lowerCAmelCase_ ), sequence_length, 2) , lowerCAmelCase_ )
else:
__lowercase : Optional[int] = np.full((len(lowerCAmelCase_ ), sequence_length) , lowerCAmelCase_ )
for i, tensor in enumerate(lowerCAmelCase_ ):
if padding_side == "right":
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : Optional[Any] = tensor[:sequence_length]
else:
__lowercase : List[str] = tensor[:sequence_length]
else:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : Dict = tensor[:sequence_length]
else:
__lowercase : str = tensor[:sequence_length]
return out_tensor.tolist()
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
__lowercase : Optional[Any] = ord(lowerCAmelCase_ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
__lowercase : List[str] = unicodedata.category(lowerCAmelCase_ )
if cat.startswith("""P""" ):
return True
return False
@dataclass
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : PreTrainedTokenizerBase
_A : Union[bool, str, PaddingStrategy] = True
_A : Optional[int] = None
_A : Optional[int] = None
_A : int = -100
_A : str = "pt"
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
import torch
__lowercase : Dict = """label""" if """label""" in features[0].keys() else """labels"""
__lowercase : str = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
__lowercase : Any = self.tokenizer.pad(
__a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , )
if labels is None:
return batch
__lowercase : int = torch.tensor(batch["""entity_ids"""] ).shape[1]
__lowercase : Any = self.tokenizer.padding_side
if padding_side == "right":
__lowercase : List[str] = [
list(__a ) + [self.label_pad_token_id] * (sequence_length - len(__a )) for label in labels
]
else:
__lowercase : Any = [
[self.label_pad_token_id] * (sequence_length - len(__a )) + list(__a ) for label in labels
]
__lowercase : Optional[int] = [feature["""ner_tags"""] for feature in features]
__lowercase : Dict = padding_tensor(__a , -1 , __a , __a )
__lowercase : int = [feature["""original_entity_spans"""] for feature in features]
__lowercase : Optional[int] = padding_tensor(__a , (-1, -1) , __a , __a )
__lowercase : Dict = {k: torch.tensor(__a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 306
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
from manim import *
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : List[str] = Rectangle(height=0.5 , width=0.5 )
__lowercase : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__lowercase : Dict = [mem.copy() for i in range(6 )]
__lowercase : List[str] = [mem.copy() for i in range(6 )]
__lowercase : str = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : List[Any] = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : str = VGroup(__a , __a ).arrange(__a , buff=0 )
__lowercase : Any = Text("""CPU""" , font_size=24 )
__lowercase : Union[str, Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
__lowercase : List[Any] = [mem.copy() for i in range(1 )]
__lowercase : Union[str, Any] = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : str = Text("""GPU""" , font_size=24 )
__lowercase : List[Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.align_to(__a , __a )
gpu.set_x(gpu.get_x() - 1 )
self.add(__a )
__lowercase : Optional[Any] = [mem.copy() for i in range(6 )]
__lowercase : Union[str, Any] = VGroup(*__a ).arrange(__a , buff=0 )
__lowercase : Dict = Text("""Model""" , font_size=24 )
__lowercase : Optional[Any] = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.play(
Create(__a , run_time=1 ) , Create(__a , run_time=1 ) , Create(__a , run_time=1 ) , )
__lowercase : Union[str, Any] = MarkupText(
F"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=24 , )
__lowercase : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowercase : List[str] = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=2.5 ) , Write(__a ) , Write(__a ) )
self.add(__a )
__lowercase : Optional[int] = []
__lowercase : List[str] = []
__lowercase : Dict = []
for i, rect in enumerate(__a ):
__lowercase : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
cpu_target.move_to(__a )
cpu_target.generate_target()
__lowercase : Union[str, Any] = 0.46 / 4
__lowercase : Optional[int] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__a , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__a , buff=0.0 )
cpu_targs.append(__a )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__a ) )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 306
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__lowercase : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowercase : Optional[Any] = model(__a )["""last_hidden_state"""]
__lowercase : Any = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__lowercase : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 306
| 1
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Dict = ['''vqvae''']
def __init__( self : List[str] , __a : AutoencoderKL , __a : UNetaDConditionModel , __a : Mel , __a : Union[DDIMScheduler, DDPMScheduler] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , __a ) else 1000
@torch.no_grad()
def __call__( self : Union[str, Any] , __a : int = 1 , __a : str = None , __a : np.ndarray = None , __a : int = 0 , __a : int = 0 , __a : int = None , __a : torch.Generator = None , __a : float = 0 , __a : float = 0 , __a : torch.Generator = None , __a : float = 0 , __a : torch.Tensor = None , __a : torch.Tensor = None , __a : Any=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
__lowercase : Optional[int] = steps or self.get_default_steps()
self.scheduler.set_timesteps(__a )
__lowercase : Any = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__lowercase : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__lowercase : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__a , device=self.device , )
__lowercase : List[Any] = noise
__lowercase : List[str] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__a , __a )
__lowercase : Dict = self.mel.audio_slice_to_image(__a )
__lowercase : List[str] = np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
__lowercase : Optional[Any] = (input_image / 255) * 2 - 1
__lowercase : int = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__lowercase : List[str] = self.vqvae.encode(torch.unsqueeze(__a , 0 ) ).latent_dist.sample(
generator=__a )[0]
__lowercase : Any = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__lowercase : List[str] = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1] )
__lowercase : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__lowercase : Union[str, Any] = int(mask_start_secs * pixels_per_second )
__lowercase : Optional[int] = int(mask_end_secs * pixels_per_second )
__lowercase : Optional[Any] = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __a ):
__lowercase : List[Any] = self.unet(__a , __a , __a )["""sample"""]
else:
__lowercase : Any = self.unet(__a , __a )["""sample"""]
if isinstance(self.scheduler , __a ):
__lowercase : List[str] = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )["""prev_sample"""]
else:
__lowercase : str = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , generator=__a , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
__lowercase : Tuple = mask[:, step, :, :mask_start]
if mask_end > 0:
__lowercase : Tuple = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__lowercase : int = 1 / self.vqvae.config.scaling_factor * images
__lowercase : List[str] = self.vqvae.decode(__a )["""sample"""]
__lowercase : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
__lowercase : Any = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__lowercase : Any = (images * 255).round().astype("""uint8""" )
__lowercase : str = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__a , mode="""RGB""" ).convert("""L""" ) for _ in images) )
__lowercase : str = [self.mel.image_to_audio(__a ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__a )[:, np.newaxis, :] ) , **ImagePipelineOutput(__a ) )
@torch.no_grad()
def lowerCAmelCase ( self : List[Any] , __a : List[Image.Image] , __a : int = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , __a )
self.scheduler.set_timesteps(__a )
__lowercase : List[Any] = np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
__lowercase : List[Any] = (sample / 255) * 2 - 1
__lowercase : Any = torch.Tensor(__a ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__lowercase : List[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__lowercase : List[str] = self.scheduler.alphas_cumprod[t]
__lowercase : int = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__lowercase : Tuple = 1 - alpha_prod_t
__lowercase : Union[str, Any] = self.unet(__a , __a )["""sample"""]
__lowercase : int = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__lowercase : Optional[Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__lowercase : Dict = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowerCAmelCase ( __a : torch.Tensor , __a : torch.Tensor , __a : float ) -> torch.Tensor:
"""simple docstring"""
__lowercase : Optional[Any] = acos(torch.dot(torch.flatten(__a ) , torch.flatten(__a ) ) / torch.norm(__a ) / torch.norm(__a ) )
return sin((1 - alpha) * theta ) * xa / sin(__a ) + sin(alpha * theta ) * xa / sin(__a )
| 306
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
__lowercase : str = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Tuple = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Optional[Any] = True
if a[i].islower():
__lowercase : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) : List[Any] = extended_euclid(lowerCAmelCase_ , a % b )
__lowercase : int = a // b
return (y, x - k * y)
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
((__lowercase) , (__lowercase)) : Optional[Any] = extended_euclid(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Optional[Any] = na * na
__lowercase : str = ra * x * na + ra * y * na
return (n % m + m) % m
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
((__lowercase) , (__lowercase)) : int = extended_euclid(lowerCAmelCase_ , lowerCAmelCase_ )
if b < 0:
__lowercase : Union[str, Any] = (b % n + n) % n
return b
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
__lowercase , __lowercase : Union[str, Any] = invert_modulo(lowerCAmelCase_ , lowerCAmelCase_ ), invert_modulo(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : str = na * na
__lowercase : Dict = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 306
|
from scipy.stats import spearmanr
import datasets
lowerCamelCase : List[str] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCamelCase : List[str] = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCamelCase : Union[str, Any] = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = spearmanr(__a , __a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 306
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''nllb-moe'''
_A : List[str] = ['''past_key_values''']
_A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any:
"""simple docstring"""
__lowercase : int = vocab_size
__lowercase : List[Any] = max_position_embeddings
__lowercase : Tuple = d_model
__lowercase : str = encoder_ffn_dim
__lowercase : List[str] = encoder_layers
__lowercase : int = encoder_attention_heads
__lowercase : List[Any] = decoder_ffn_dim
__lowercase : int = decoder_layers
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : Union[str, Any] = dropout
__lowercase : str = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : List[Any] = activation_function
__lowercase : List[str] = init_std
__lowercase : Optional[int] = encoder_layerdrop
__lowercase : str = decoder_layerdrop
__lowercase : Dict = use_cache
__lowercase : Optional[Any] = encoder_layers
__lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : List[Any] = router_z_loss_coef
__lowercase : Tuple = router_aux_loss_coef
__lowercase : str = decoder_sparse_step
__lowercase : Any = encoder_sparse_step
__lowercase : str = num_experts
__lowercase : List[Any] = expert_capacity
__lowercase : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase : Optional[int] = router_dtype
__lowercase : Any = router_ignore_padding_tokens
__lowercase : Optional[Any] = batch_prioritized_routing
__lowercase : str = second_expert_policy
__lowercase : List[str] = normalize_router_prob_before_dropping
__lowercase : List[Any] = moe_eval_capacity_token_fraction
__lowercase : List[str] = moe_token_dropout
__lowercase : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 306
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__lowercase : Tuple = str(lowerCAmelCase_ )
__lowercase : Dict = """""".join(sorted(lowerCAmelCase_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case_ ( lowerCAmelCase_ : float = 99 ):
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__lowercase : Union[str, Any] = 0
__lowercase : Optional[int] = 1
while True:
if check_bouncy(lowerCAmelCase_ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 306
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : int = 10**9 ):
__lowercase : Tuple = 1
__lowercase : Union[str, Any] = 2
__lowercase : Union[str, Any] = 0
__lowercase : Dict = 0
__lowercase : List[Any] = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__lowercase : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'''{solution() = }''')
| 306
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 306
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase : str = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] ):
__lowercase : Any = b.T
__lowercase : List[Any] = np.sum(np.square(lowerCAmelCase_ ) , axis=1 )
__lowercase : List[Any] = np.sum(np.square(lowerCAmelCase_ ) , axis=0 )
__lowercase : Union[str, Any] = np.matmul(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = aa[:, None] - 2 * ab + ba[None, :]
return d
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str] ):
__lowercase : Optional[int] = x.reshape(-1 , 3 )
__lowercase : Dict = squared_euclidean_distance(lowerCAmelCase_ , lowerCAmelCase_ )
return np.argmin(lowerCAmelCase_ , axis=1 )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : str , __a : Optional[Union[List[List[int]], np.ndarray]] = None , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : bool = True , **__a : Dict , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : List[str] = size if size is not None else {"""height""": 256, """width""": 256}
__lowercase : Optional[Any] = get_size_dict(__a )
__lowercase : List[str] = np.array(__a ) if clusters is not None else None
__lowercase : Any = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[str] = resample
__lowercase : Optional[int] = do_normalize
__lowercase : Optional[Any] = do_color_quantize
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Optional[int] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"Size dictionary must contain both height and width keys. Got {size.keys()}" )
return resize(
__a , size=(size["""height"""], size["""width"""]) , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Dict , __a : np.ndarray , __a : Optional[Union[str, ChannelDimension]] = None , ) -> np.ndarray:
"""simple docstring"""
__lowercase : str = rescale(image=__a , scale=1 / 127.5 , data_format=__a )
__lowercase : int = image - 1
return image
def lowerCAmelCase ( self : Union[str, Any] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Optional[bool] = None , __a : Optional[Union[List[List[int]], np.ndarray]] = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__a : Dict , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : str = do_resize if do_resize is not None else self.do_resize
__lowercase : Tuple = size if size is not None else self.size
__lowercase : Union[str, Any] = get_size_dict(__a )
__lowercase : Union[str, Any] = resample if resample is not None else self.resample
__lowercase : Tuple = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : List[str] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
__lowercase : List[str] = clusters if clusters is not None else self.clusters
__lowercase : Union[str, Any] = np.array(__a )
__lowercase : List[Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_color_quantize and clusters is None:
raise ValueError("""Clusters must be specified if do_color_quantize is True.""" )
# All transformations expect numpy arrays.
__lowercase : Union[str, Any] = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_normalize:
__lowercase : Tuple = [self.normalize(image=__a ) for image in images]
if do_color_quantize:
__lowercase : str = [to_channel_dimension_format(__a , ChannelDimension.LAST ) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
__lowercase : Optional[Any] = np.array(__a )
__lowercase : Tuple = color_quantize(__a , __a ).reshape(images.shape[:-1] )
# flatten to (batch_size, height*width)
__lowercase : Any = images.shape[0]
__lowercase : Tuple = images.reshape(__a , -1 )
# We need to convert back to a list of images to keep consistent behaviour across processors.
__lowercase : Optional[int] = list(__a )
else:
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : List[Any] = {"""input_ids""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 306
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
__lowercase : Union[str, Any] = 0
__lowercase : Any = len(lowerCAmelCase_ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowercase : Tuple = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase_ ):
return None
__lowercase : Any = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowercase : Any = left
__lowercase : Dict = point
elif point > right:
__lowercase : List[str] = right
__lowercase : Union[str, Any] = point
else:
if item < current_item:
__lowercase : int = point - 1
else:
__lowercase : Dict = point + 1
return None
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowercase : Any = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase_ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
elif point > right:
return interpolation_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCAmelCase_ , lowerCAmelCase_ , point + 1 , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Any ):
if collection != sorted(lowerCAmelCase_ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
lowerCamelCase : Optional[int] = 0
if debug == 1:
lowerCamelCase : int = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
lowerCamelCase : str = 67
lowerCamelCase : Dict = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print('''Not found''')
| 306
|
def snake_case_ ( lowerCAmelCase_ : int = 200 ):
__lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__lowercase : List[str] = [0] * (pence + 1)
__lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : int ):
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), F"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
__lowercase : Tuple = F"The input value of [n={number}] has to be > 0"
raise ValueError(lowerCAmelCase_ )
else:
__lowercase : Optional[int] = sylvester(number - 1 )
__lowercase : Optional[Any] = num - 1
__lowercase : Dict = num
return lower * upper + 1
if __name__ == "__main__":
print(f'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 306
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a )
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] ):
__lowercase : Any = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple ):
__lowercase : Optional[Any] = 0
while b > 0:
if b & 1:
__lowercase : List[str] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 306
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
for attribute in key.split(""".""" ):
__lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__lowercase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase : Dict = value
elif weight_type == "weight_g":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowercase : List[Any] = value
elif weight_type == "bias":
__lowercase : int = value
elif weight_type == "running_mean":
__lowercase : List[Any] = value
elif weight_type == "running_var":
__lowercase : int = value
elif weight_type == "num_batches_tracked":
__lowercase : int = value
elif weight_type == "inv_freq":
__lowercase : Optional[Any] = value
else:
__lowercase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = []
__lowercase : Any = fairseq_model.state_dict()
__lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase : Tuple = True
if "*" in mapped_key:
__lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
__lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "pos_bias_u" in name:
__lowercase : Any = None
elif "pos_bias_v" in name:
__lowercase : Tuple = None
elif "weight_g" in name:
__lowercase : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__lowercase : Dict = """weight_v"""
elif "bias" in name:
__lowercase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase : str = """weight"""
elif "running_mean" in name:
__lowercase : str = """running_mean"""
elif "inv_freq" in name:
__lowercase : List[Any] = """inv_freq"""
elif "running_var" in name:
__lowercase : Any = """running_var"""
elif "num_batches_tracked" in name:
__lowercase : Any = """num_batches_tracked"""
else:
__lowercase : Optional[int] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowercase : int = name.split(""".""" )
__lowercase : Optional[Any] = int(items[0] )
__lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ):
if config_path is not None:
__lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" )
else:
__lowercase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase : Tuple = """rotary"""
if is_finetuned:
if dict_path:
__lowercase : Any = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : List[Any] = target_dict.pad_index
__lowercase : Optional[int] = target_dict.bos_index
__lowercase : List[Any] = target_dict.eos_index
__lowercase : List[str] = len(target_dict.symbols )
__lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase : int = 0
__lowercase : Any = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
__lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False
__lowercase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
__lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
__lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
__lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
__lowercase : Dict = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : Any = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 306
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : List[Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
__lowercase : str = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
lowerCamelCase : Any = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowerCamelCase : int = ['''a''', '''b''', '''c''', '''d''', '''e''']
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] ):
__lowercase : int = start
# add current to visited
visited.append(lowerCAmelCase_ )
__lowercase : List[str] = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowercase : Optional[int] = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# if all neighbors visited add current to sort
sort.append(lowerCAmelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
for vertice in vertices:
if vertice not in visited:
__lowercase : Union[str, Any] = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase : int = topological_sort('''a''', [], [])
print(sort)
| 306
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
| 306
| 1
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCamelCase : str = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = '''maskformer'''
_A : Tuple = {'''hidden_size''': '''mask_feature_size'''}
_A : Optional[int] = ['''resnet''', '''swin''']
_A : str = ['''detr''']
def __init__( self : Any , __a : int = 256 , __a : int = 256 , __a : float = 0.1 , __a : bool = False , __a : Optional[Dict] = None , __a : Optional[Dict] = None , __a : float = 0.02 , __a : float = 1.0 , __a : float = 1.0 , __a : float = 1.0 , __a : float = 20.0 , __a : Optional[bool] = None , **__a : Union[str, Any] , ) -> Any:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__lowercase : List[str] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__a , __a ):
__lowercase : Any = backbone_config.pop("""model_type""" )
__lowercase : List[str] = CONFIG_MAPPING[backbone_model_type]
__lowercase : Optional[int] = config_class.from_dict(__a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__lowercase : Optional[Any] = DetrConfig()
else:
# verify that the decoder is supported
__lowercase : Dict = (
decoder_config.pop("""model_type""" ) if isinstance(__a , __a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(__a , __a ):
__lowercase : Dict = CONFIG_MAPPING[decoder_type]
__lowercase : Any = config_class.from_dict(__a )
__lowercase : List[str] = backbone_config
__lowercase : Optional[int] = decoder_config
# main feature dimension for the model
__lowercase : Dict = fpn_feature_size
__lowercase : Optional[int] = mask_feature_size
# initializer
__lowercase : str = init_std
__lowercase : int = init_xavier_std
# Hungarian matcher && loss
__lowercase : int = cross_entropy_weight
__lowercase : List[str] = dice_weight
__lowercase : Dict = mask_weight
__lowercase : Any = use_auxiliary_loss
__lowercase : str = no_object_weight
__lowercase : str = output_auxiliary_logits
__lowercase : List[Any] = self.decoder_config.encoder_attention_heads
__lowercase : List[Any] = self.decoder_config.num_hidden_layers
super().__init__(**__a )
@classmethod
def lowerCAmelCase ( cls : int , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> List[str]:
"""simple docstring"""
return cls(
backbone_config=__a , decoder_config=__a , **__a , )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict[str, any]:
"""simple docstring"""
__lowercase : str = copy.deepcopy(self.__dict__ )
__lowercase : Dict = self.backbone_config.to_dict()
__lowercase : int = self.decoder_config.to_dict()
__lowercase : Optional[int] = self.__class__.model_type
return output
| 306
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 306
| 1
|
from __future__ import annotations
from math import pi
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : List[str] = 2_56
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
__lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( lowerCAmelCase_ : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , *,
__a : float = 0.85 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = duplication_jaccard_threshold
__lowercase : Optional[Any] = NUM_PERM
__lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase : List[str] = defaultdict(__a )
def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None:
"""simple docstring"""
__lowercase : List[Any] = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase : Dict = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase : List[str] = [base] + list(__a )
# reformat the cluster to be a list of dict
__lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def lowerCAmelCase ( self : Any , __a : int ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase , __lowercase : Union[str, Any] = element
__lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ):
__lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[str] = get_tokens(lowerCAmelCase_ )
__lowercase : Dict = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : List[str] = None
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = []
for elementa in cluster:
__lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase : Dict = 1
extremes.append(lowerCAmelCase_ )
return extremes
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
global _shared_dataset
__lowercase : Tuple = dataset
__lowercase : Optional[int] = []
__lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ):
__lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase : int = {}
__lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase : Optional[Any] = element
__lowercase : int = duplicate_indices - set(extreme_dict.keys() )
__lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(lowerCAmelCase_ )}" )
print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" )
print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" )
return ds_filter, duplicate_clusters
| 306
| 1
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : str , __a : int=13 , __a : Tuple=7 , __a : Optional[Any]=True , __a : str=True , __a : Union[str, Any]=False , __a : Union[str, Any]=True , __a : Tuple=99 , __a : Optional[int]=64 , __a : Dict=5 , __a : str=4 , __a : Tuple=64 , __a : Dict="gelu" , __a : Optional[Any]=0.1 , __a : Any=0.1 , __a : str=512 , __a : Dict=16 , __a : List[str]=2 , __a : List[Any]=0.02 , __a : Union[str, Any]=3 , __a : Optional[Any]=4 , __a : Optional[Any]=None , ) -> str:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : str = batch_size
__lowercase : Union[str, Any] = seq_length
__lowercase : Any = is_training
__lowercase : List[Any] = use_input_mask
__lowercase : Optional[Any] = use_token_type_ids
__lowercase : Union[str, Any] = use_labels
__lowercase : int = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : str = num_hidden_layers
__lowercase : int = num_attention_heads
__lowercase : str = intermediate_size
__lowercase : str = hidden_act
__lowercase : Dict = hidden_dropout_prob
__lowercase : int = attention_probs_dropout_prob
__lowercase : int = max_position_embeddings
__lowercase : Dict = type_vocab_size
__lowercase : List[Any] = type_sequence_label_size
__lowercase : Optional[Any] = initializer_range
__lowercase : Any = num_labels
__lowercase : List[Any] = num_choices
__lowercase : Union[str, Any] = scope
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Tuple = None
if self.use_input_mask:
__lowercase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : Optional[int] = None
__lowercase : str = None
if self.use_labels:
__lowercase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : List[Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : int , __a : Any , __a : Tuple , __a : List[str] , __a : Optional[Any] , __a : List[str] , __a : List[Any] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = MPNetModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : str , __a : Tuple , __a : List[Any] , __a : List[Any] , __a : Optional[Any] , __a : List[str] , __a : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = MPNetForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(
__a , attention_mask=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : List[Any] , __a : List[Any] , __a : List[Any] , __a : str , __a : Any , __a : Tuple , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = self.num_labels
__lowercase : Optional[int] = MPNetForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : str , __a : List[str] , __a : Dict , __a : Any , __a : Dict , __a : List[Any] , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : int = self.num_choices
__lowercase : str = MPNetForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__lowercase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : List[str] = model(
__a , attention_mask=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Any , __a : Optional[Any] , __a : List[str] , __a : Any , __a : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Any = self.num_labels
__lowercase : Any = MPNetForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
((__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase) , (__lowercase)) : Union[str, Any] = config_and_inputs
__lowercase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : str = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_A : Any = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : str = False
_A : Dict = True
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[Any] = MPNetModelTester(self )
__lowercase : List[str] = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__a )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__a )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__a )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__a )
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
__lowercase : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__lowercase : Union[str, Any] = model(__a )[0]
__lowercase : List[str] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __a )
__lowercase : List[str] = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 306
|
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''image_processor''', '''feature_extractor''']
_A : List[Any] = '''TvltImageProcessor'''
_A : Optional[int] = '''TvltFeatureExtractor'''
def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__(image_processor=__a , feature_extractor=__a )
__lowercase : Union[str, Any] = image_processor
__lowercase : Tuple = feature_extractor
def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowercase : Tuple = None
if images is not None:
__lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
__lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
__lowercase : Optional[Any] = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
__lowercase : Tuple = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.image_processor.model_input_names
__lowercase : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 306
| 1
|
from __future__ import annotations
import math
def snake_case_ ( lowerCAmelCase_ : int ):
if num <= 0:
__lowercase : List[Any] = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(lowerCAmelCase_ )
__lowercase : int = [True] * (num + 1)
__lowercase : Dict = []
__lowercase : str = 2
__lowercase : List[str] = int(math.sqrt(lowerCAmelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCAmelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCAmelCase_ ):
if sieve[i] is True:
__lowercase : Tuple = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(lowerCAmelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 306
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : int = batch_size
__lowercase : Any = seq_length
__lowercase : str = is_training
__lowercase : str = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : int = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : Union[str, Any] = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : Tuple = scope
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
__lowercase : str = model(__a , attention_mask=__a )
__lowercase : List[Any] = model(__a )
__lowercase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowercase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Any = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[str] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = False
_A : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = ()
_A : List[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Optional[Any] = True
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = EsmModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : List[str] = EsmEmbeddings(config=__a )
__lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowercase : int = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : Optional[Any] = EsmEmbeddings(config=__a )
__lowercase : Optional[int] = torch.empty(2 , 4 , 30 )
__lowercase : Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( __a ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[str] = model(__a )[0]
__lowercase : Union[str, Any] = 33
__lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
__lowercase : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : Any = model(__a )[0]
# compare the actual values for a slice.
__lowercase : int = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 306
| 1
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''encodec'''
def __init__( self : int , __a : int=[1.5, 3.0, 6.0, 12.0, 24.0] , __a : int=24000 , __a : Any=1 , __a : int=False , __a : Optional[int]=None , __a : Tuple=None , __a : Tuple=128 , __a : Any=32 , __a : Optional[int]=1 , __a : Optional[Any]=[8, 5, 4, 2] , __a : Optional[Any]="weight_norm" , __a : List[str]=7 , __a : int=7 , __a : Optional[Any]=3 , __a : Optional[Any]=2 , __a : int=True , __a : List[str]="reflect" , __a : Any=2 , __a : str=2 , __a : int=1.0 , __a : List[Any]=1024 , __a : Tuple=None , __a : Any=True , **__a : str , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = target_bandwidths
__lowercase : Any = sampling_rate
__lowercase : Optional[Any] = audio_channels
__lowercase : List[str] = normalize
__lowercase : Optional[Any] = chunk_length_s
__lowercase : Optional[Any] = overlap
__lowercase : str = hidden_size
__lowercase : int = num_filters
__lowercase : Optional[int] = num_residual_layers
__lowercase : Union[str, Any] = upsampling_ratios
__lowercase : int = norm_type
__lowercase : List[Any] = kernel_size
__lowercase : List[Any] = last_kernel_size
__lowercase : str = residual_kernel_size
__lowercase : List[str] = dilation_growth_rate
__lowercase : Union[str, Any] = use_causal_conv
__lowercase : List[str] = pad_mode
__lowercase : str = compress
__lowercase : Tuple = num_lstm_layers
__lowercase : Tuple = trim_right_ratio
__lowercase : Optional[Any] = codebook_size
__lowercase : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
__lowercase : str = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**__a )
@property
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 306
|
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 306
| 1
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
lowerCamelCase : str = HUGGINGFACE_HUB_CACHE
lowerCamelCase : Tuple = '''config.json'''
lowerCamelCase : List[str] = '''diffusion_pytorch_model.bin'''
lowerCamelCase : Optional[int] = '''diffusion_flax_model.msgpack'''
lowerCamelCase : int = '''model.onnx'''
lowerCamelCase : Any = '''diffusion_pytorch_model.safetensors'''
lowerCamelCase : List[str] = '''weights.pb'''
lowerCamelCase : Optional[int] = '''https://huggingface.co'''
lowerCamelCase : List[Any] = default_cache_path
lowerCamelCase : Optional[Any] = '''diffusers_modules'''
lowerCamelCase : Tuple = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
lowerCamelCase : Tuple = ['''fp16''', '''non-ema''']
lowerCamelCase : List[Any] = '''.self_attn'''
| 306
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = (DPMSolverSDEScheduler,)
_A : Dict = 10
def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**__a )
return config
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.scheduler_classes[0]
__lowercase : List[str] = self.get_scheduler_config()
__lowercase : Any = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[Any] = self.dummy_model()
__lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Optional[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Optional[Any] = scheduler.step(__a , __a , __a )
__lowercase : str = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowercase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[int] = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Dict = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Dict = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[int] = model(__a , __a )
__lowercase : Optional[int] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : List[str] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config()
__lowercase : Optional[int] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : int = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase : int = scheduler.scale_model_input(__a , __a )
__lowercase : List[str] = model(__a , __a )
__lowercase : List[str] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : List[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.scheduler_classes[0]
__lowercase : List[Any] = self.get_scheduler_config()
__lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : List[str] = self.dummy_model()
__lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
__lowercase : str = sample.to(__a )
for t in scheduler.timesteps:
__lowercase : List[Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Any = scheduler.step(__a , __a , __a )
__lowercase : Optional[Any] = output.prev_sample
__lowercase : Any = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 306
| 1
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Optional[Any] = '''▁'''
lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = BertGenerationTokenizer
_A : int = False
_A : Dict = True
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase : str = BertGenerationTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = """<s>"""
__lowercase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__a ) , 1002 )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : str = BertGenerationTokenizer(__a , keep_accents=__a )
__lowercase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [285, 46, 10, 170, 382] , )
__lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowercase : Tuple = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowercase : List[str] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Any = """Hello World!"""
__lowercase : Tuple = [18536, 2260, 101]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__lowercase : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__lowercase : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowercase : Optional[Any] = """ """.join(__a )
__lowercase : Union[str, Any] = self.big_tokenizer.encode_plus(__a , return_tensors="""pt""" , return_token_type_ids=__a )
__lowercase : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__a )
__lowercase : Optional[Any] = BertGenerationConfig()
__lowercase : Union[str, Any] = BertGenerationEncoder(__a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = {"""input_ids""": [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 306
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : str = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase : List[str] = args.per_device_eval_batch_size
lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : List[str] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ )
# start time
__lowercase : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowercase : int = time.time()
__lowercase : Union[str, Any] = end_time - start_time
__lowercase : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names
lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def snake_case_ ( lowerCAmelCase_ : int ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowercase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowercase : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowercase : Any = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ )
__lowercase : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowercase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowercase : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCamelCase : Tuple = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase : Union[str, Any] = default_data_collator
lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowercase : int = postprocess_qa_predictions(
examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowercase : Optional[int] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ )
lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def snake_case_ ( lowerCAmelCase_ : str ):
return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase : int = 0.0
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = timeit.default_timer()
lowerCamelCase : List[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : Dict = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 306
| 1
|
from collections.abc import Callable
import numpy as np
def snake_case_ ( lowerCAmelCase_ : Callable , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
__lowercase : Tuple = int(np.ceil((x_end - xa) / step_size ) )
__lowercase : Any = np.zeros((n + 1,) )
__lowercase : Dict = ya
__lowercase : Union[str, Any] = xa
for k in range(lowerCAmelCase_ ):
__lowercase : Dict = y[k] + step_size * ode_func(lowerCAmelCase_ , y[k] )
__lowercase : str = y[k] + (
(step_size / 2) * (ode_func(lowerCAmelCase_ , y[k] ) + ode_func(x + step_size , lowerCAmelCase_ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''nllb-moe'''
_A : List[str] = ['''past_key_values''']
_A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any:
"""simple docstring"""
__lowercase : int = vocab_size
__lowercase : List[Any] = max_position_embeddings
__lowercase : Tuple = d_model
__lowercase : str = encoder_ffn_dim
__lowercase : List[str] = encoder_layers
__lowercase : int = encoder_attention_heads
__lowercase : List[Any] = decoder_ffn_dim
__lowercase : int = decoder_layers
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : Union[str, Any] = dropout
__lowercase : str = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : List[Any] = activation_function
__lowercase : List[str] = init_std
__lowercase : Optional[int] = encoder_layerdrop
__lowercase : str = decoder_layerdrop
__lowercase : Dict = use_cache
__lowercase : Optional[Any] = encoder_layers
__lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : List[Any] = router_z_loss_coef
__lowercase : Tuple = router_aux_loss_coef
__lowercase : str = decoder_sparse_step
__lowercase : Any = encoder_sparse_step
__lowercase : str = num_experts
__lowercase : List[Any] = expert_capacity
__lowercase : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase : Optional[int] = router_dtype
__lowercase : Any = router_ignore_padding_tokens
__lowercase : Optional[Any] = batch_prioritized_routing
__lowercase : str = second_expert_policy
__lowercase : List[str] = normalize_router_prob_before_dropping
__lowercase : List[Any] = moe_eval_capacity_token_fraction
__lowercase : List[str] = moe_token_dropout
__lowercase : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
__lowercase : str = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Tuple = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Optional[Any] = True
if a[i].islower():
__lowercase : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[Any] = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['''PoolFormerFeatureExtractor''']
lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 306
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : int = create_tensor(lowerCAmelCase_ )
__lowercase : str = gather(lowerCAmelCase_ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[str] = [state.process_index]
__lowercase : int = gather_object(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == state.num_processes, F"{gathered_obj}, {len(lowerCAmelCase_ )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[str] = create_tensor(lowerCAmelCase_ )
__lowercase : str = broadcast(lowerCAmelCase_ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def snake_case_ ( lowerCAmelCase_ : Any ):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
__lowercase : Tuple = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase : str = torch.arange(state.num_processes ).to(state.device )
__lowercase : List[Any] = pad_across_processes(lowerCAmelCase_ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
# For now runs on only two processes
if state.num_processes != 2:
return
__lowercase : Union[str, Any] = create_tensor(lowerCAmelCase_ )
__lowercase : List[Any] = reduce(lowerCAmelCase_ , """sum""" )
__lowercase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ ), F"{reduced_tensor} != {truth_tensor}"
def snake_case_ ( lowerCAmelCase_ : Tuple ):
# For now runs on only two processes
if state.num_processes != 2:
return
__lowercase : Optional[int] = create_tensor(lowerCAmelCase_ )
__lowercase : Tuple = reduce(lowerCAmelCase_ , """mean""" )
__lowercase : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ ), F"{reduced_tensor} != {truth_tensor}"
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
# For xla_spawn (TPUs)
main()
def snake_case_ ( ):
__lowercase : Optional[int] = PartialState()
state.print(F"State: {state}" )
state.print("""testing gather""" )
test_gather(lowerCAmelCase_ )
state.print("""testing gather_object""" )
test_gather_object(lowerCAmelCase_ )
state.print("""testing broadcast""" )
test_broadcast(lowerCAmelCase_ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(lowerCAmelCase_ )
state.print("""testing reduce_sum""" )
test_reduce_sum(lowerCAmelCase_ )
state.print("""testing reduce_mean""" )
test_reduce_mean(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 306
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__lowercase : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowercase : Optional[Any] = model(__a )["""last_hidden_state"""]
__lowercase : Any = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__lowercase : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 306
| 1
|
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = """laion/clap-htsat-unfused"""
__lowercase : Union[str, Any] = tempfile.mkdtemp()
def lowerCAmelCase ( self : Optional[int] , **__a : int ) -> Optional[Any]:
"""simple docstring"""
return RobertaTokenizer.from_pretrained(self.checkpoint , **__a )
def lowerCAmelCase ( self : Union[str, Any] , **__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__a )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self.get_tokenizer()
__lowercase : str = self.get_feature_extractor()
__lowercase : Tuple = ClapProcessor(tokenizer=__a , feature_extractor=__a )
processor.save_pretrained(self.tmpdirname )
__lowercase : Dict = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__lowercase : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowercase : int = self.get_feature_extractor(do_normalize=__a , padding_value=1.0 )
__lowercase : int = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __a )
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = self.get_feature_extractor()
__lowercase : List[str] = self.get_tokenizer()
__lowercase : Optional[Any] = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__lowercase : Optional[int] = floats_list((3, 1000) )
__lowercase : Dict = feature_extractor(__a , return_tensors="""np""" )
__lowercase : List[str] = processor(audios=__a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = self.get_feature_extractor()
__lowercase : List[str] = self.get_tokenizer()
__lowercase : Optional[Any] = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__lowercase : Optional[Any] = """This is a test string"""
__lowercase : Dict = processor(text=__a )
__lowercase : int = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Tuple = self.get_feature_extractor()
__lowercase : Any = self.get_tokenizer()
__lowercase : List[str] = ClapProcessor(tokenizer=__a , feature_extractor=__a )
__lowercase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : int = processor.batch_decode(__a )
__lowercase : Union[str, Any] = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_feature_extractor()
__lowercase : List[Any] = self.get_tokenizer()
__lowercase : Dict = ClapProcessor(tokenizer=__a , feature_extractor=__a )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 306
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
__lowercase : str = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Tuple = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Optional[Any] = True
if a[i].islower():
__lowercase : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
from scipy.stats import spearmanr
import datasets
lowerCamelCase : List[str] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCamelCase : List[str] = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCamelCase : Union[str, Any] = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = spearmanr(__a , __a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 306
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = '''▁'''
lowerCamelCase : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCamelCase : int = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCamelCase : Optional[Any] = {
'''facebook/mbart-large-50-one-to-many-mmt''': 10_24,
}
# fmt: off
lowerCamelCase : List[Any] = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = VOCAB_FILES_NAMES
_A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : List[str] = PRETRAINED_VOCAB_FILES_MAP
_A : Tuple = ['''input_ids''', '''attention_mask''']
_A : List[int] = []
_A : List[int] = []
def __init__( self : List[Any] , __a : Dict , __a : List[str]=None , __a : Union[str, Any]=None , __a : List[str]="</s>" , __a : List[Any]="</s>" , __a : str="<s>" , __a : str="<unk>" , __a : Tuple="<pad>" , __a : Optional[int]="<mask>" , __a : Optional[Dict[str, Any]] = None , **__a : Union[str, Any] , ) -> None:
"""simple docstring"""
__lowercase : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__lowercase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__lowercase : Any = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__a , tgt_lang=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
__lowercase : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowercase : Union[str, Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowercase : Optional[Any] = 1
__lowercase : str = len(self.sp_model )
__lowercase : int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a )
}
__lowercase : Optional[Any] = {v: k for k, v in self.lang_code_to_id.items()}
__lowercase : Optional[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__lowercase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
__lowercase : List[Any] = src_lang if src_lang is not None else """en_XX"""
__lowercase : str = self.lang_code_to_id[self._src_lang]
__lowercase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCAmelCase ( self : str , __a : str ) -> None:
"""simple docstring"""
__lowercase : Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : str = self.__dict__.copy()
__lowercase : Dict = None
return state
def __setstate__( self : List[str] , __a : Dict ) -> None:
"""simple docstring"""
__lowercase : str = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase : Any = {}
__lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCAmelCase ( self : Optional[Any] , __a : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__a , out_type=__a )
def lowerCAmelCase ( self : Any , __a : str ) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowercase : Dict = self.sp_model.PieceToId(__a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCAmelCase ( self : Dict , __a : int ) -> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCAmelCase ( self : str , __a : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = []
__lowercase : Tuple = """"""
__lowercase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
__lowercase : Optional[int] = True
__lowercase : Union[str, Any] = []
else:
current_sub_tokens.append(__a )
__lowercase : int = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def lowerCAmelCase ( self : str , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : List[str] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
__lowercase : Any = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def lowerCAmelCase ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
__lowercase : Any = [1] * len(self.prefix_tokens )
__lowercase : Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def lowerCAmelCase ( self : int , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase ( self : List[Any] , __a : Union[str, Any] , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : Dict ) -> List[str]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__lowercase : List[str] = src_lang
__lowercase : Optional[Any] = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
__lowercase : Any = self.convert_tokens_to_ids(__a )
__lowercase : Dict = tgt_lang_id
return inputs
def lowerCAmelCase ( self : Optional[Any] , __a : List[str] , __a : str = "en_XX" , __a : Optional[List[str]] = None , __a : str = "ro_RO" , **__a : Optional[Any] , ) -> BatchEncoding:
"""simple docstring"""
__lowercase : Dict = src_lang
__lowercase : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase ( self : Tuple , __a : str ) -> None:
"""simple docstring"""
__lowercase : int = self.lang_code_to_id[src_lang]
__lowercase : int = [self.cur_lang_code_id]
__lowercase : int = [self.eos_token_id]
def lowerCAmelCase ( self : Optional[int] , __a : str ) -> None:
"""simple docstring"""
__lowercase : Union[str, Any] = self.lang_code_to_id[tgt_lang]
__lowercase : Tuple = [self.cur_lang_code_id]
__lowercase : str = [self.eos_token_id]
| 306
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 306
| 1
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCAmelCase ( self : int , __a : Optional[Any] , __a : int , __a : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Tuple = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
__lowercase : List[str] = VideoClassificationPipeline(model=__a , image_processor=__a , top_k=2 )
__lowercase : List[str] = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def lowerCAmelCase ( self : Optional[int] , __a : Any , __a : List[str] ) -> int:
"""simple docstring"""
for example in examples:
__lowercase : List[Any] = video_classifier(__a )
self.assertEqual(
__a , [
{"""score""": ANY(__a ), """label""": ANY(__a )},
{"""score""": ANY(__a ), """label""": ANY(__a )},
] , )
@require_torch
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Tuple = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
__lowercase : Union[str, Any] = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
__lowercase : str = pipeline(
"""video-classification""" , model=__a , feature_extractor=__a , frame_sampling_rate=4 )
__lowercase : int = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
__lowercase : Optional[int] = video_classifier(__a , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
__lowercase : Optional[int] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
pass
| 306
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 306
| 1
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Any = VQModel
_A : int = '''sample'''
@property
def lowerCAmelCase ( self : Union[str, Any] , __a : List[str]=(32, 32) ) -> str:
"""simple docstring"""
__lowercase : str = 4
__lowercase : List[str] = 3
__lowercase : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__a )
return {"sample": image}
@property
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
__lowercase : str = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
__lowercase , __lowercase : str = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=__a )
self.assertIsNotNone(__a )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__a )
__lowercase : int = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(__a ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__lowercase : str = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__lowercase : Any = image.to(__a )
with torch.no_grad():
__lowercase : List[Any] = model(__a ).sample
__lowercase : Dict = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__lowercase : Union[str, Any] = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
| 306
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : list ):
if len(lowerCAmelCase_ ) <= 1:
return lst
__lowercase : Union[str, Any] = 1
while i < len(lowerCAmelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__lowercase , __lowercase : Dict = lst[i], lst[i - 1]
i -= 1
if i == 0:
__lowercase : Optional[int] = 1
return lst
if __name__ == "__main__":
lowerCamelCase : List[str] = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 306
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 1
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=5_12,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
lowerCamelCase : Union[str, Any] = parser.parse_args()
lowerCamelCase : str = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 306
|
def snake_case_ ( lowerCAmelCase_ : int = 200 ):
__lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__lowercase : List[str] = [0] * (pence + 1)
__lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 306
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[int] = '''vivit'''
def __init__( self : Dict , __a : Dict=224 , __a : List[Any]=32 , __a : List[Any]=[2, 16, 16] , __a : Dict=3 , __a : Optional[Any]=768 , __a : int=12 , __a : Dict=12 , __a : List[Any]=3072 , __a : str="gelu_fast" , __a : List[Any]=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.02 , __a : List[Any]=1E-06 , __a : Union[str, Any]=True , **__a : Dict , ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[int] = hidden_size
__lowercase : Optional[Any] = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : Optional[Any] = intermediate_size
__lowercase : Optional[int] = hidden_act
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Dict = initializer_range
__lowercase : Optional[int] = layer_norm_eps
__lowercase : int = image_size
__lowercase : Tuple = num_frames
__lowercase : List[Any] = tubelet_size
__lowercase : Dict = num_channels
__lowercase : str = qkv_bias
super().__init__(**__a )
| 306
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a )
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : list , lowerCAmelCase_ : list , lowerCAmelCase_ : int ):
__lowercase : int = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[0] * n for i in range(lowerCAmelCase_ )]
for i in range(lowerCAmelCase_ ):
__lowercase : str = y_points[i]
for i in range(2 , lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : Dict = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
for attribute in key.split(""".""" ):
__lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__lowercase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase : Dict = value
elif weight_type == "weight_g":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowercase : List[Any] = value
elif weight_type == "bias":
__lowercase : int = value
elif weight_type == "running_mean":
__lowercase : List[Any] = value
elif weight_type == "running_var":
__lowercase : int = value
elif weight_type == "num_batches_tracked":
__lowercase : int = value
elif weight_type == "inv_freq":
__lowercase : Optional[Any] = value
else:
__lowercase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = []
__lowercase : Any = fairseq_model.state_dict()
__lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase : Tuple = True
if "*" in mapped_key:
__lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
__lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "pos_bias_u" in name:
__lowercase : Any = None
elif "pos_bias_v" in name:
__lowercase : Tuple = None
elif "weight_g" in name:
__lowercase : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__lowercase : Dict = """weight_v"""
elif "bias" in name:
__lowercase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase : str = """weight"""
elif "running_mean" in name:
__lowercase : str = """running_mean"""
elif "inv_freq" in name:
__lowercase : List[Any] = """inv_freq"""
elif "running_var" in name:
__lowercase : Any = """running_var"""
elif "num_batches_tracked" in name:
__lowercase : Any = """num_batches_tracked"""
else:
__lowercase : Optional[int] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowercase : int = name.split(""".""" )
__lowercase : Optional[Any] = int(items[0] )
__lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ):
if config_path is not None:
__lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" )
else:
__lowercase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase : Tuple = """rotary"""
if is_finetuned:
if dict_path:
__lowercase : Any = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : List[Any] = target_dict.pad_index
__lowercase : Optional[int] = target_dict.bos_index
__lowercase : List[Any] = target_dict.eos_index
__lowercase : List[str] = len(target_dict.symbols )
__lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase : int = 0
__lowercase : Any = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
__lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False
__lowercase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
__lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
__lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
__lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
__lowercase : Dict = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : Any = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
__lowercase : Tuple = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
__lowercase : str = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : Any = 1
__lowercase : Dict = 3
__lowercase : Optional[int] = (32, 32)
__lowercase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Union[str, Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(__a )
@property
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
def extract(*__a : Union[str, Any] , **__a : List[str] ):
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : str = torch.ones([0] )
def lowerCAmelCase ( self : List[str] , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.pixel_values.to(__a )
return self
return Out()
return extract
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase : int = self.dummy_cond_unet
__lowercase : Dict = PNDMScheduler(skip_prk_steps=__a )
__lowercase : List[Any] = self.dummy_vae
__lowercase : Optional[Any] = self.dummy_text_encoder
__lowercase : Optional[Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
__lowercase : str = 77
__lowercase : int = self.dummy_image.to(__a )
__lowercase : List[str] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__lowercase : Optional[int] = AltDiffusionImgaImgPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
__lowercase : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a )
__lowercase : str = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__lowercase : str = """A painting of a squirrel eating a burger"""
__lowercase : Optional[int] = torch.Generator(device=__a ).manual_seed(0 )
__lowercase : List[str] = alt_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__a , )
__lowercase : Dict = output.images
__lowercase : Tuple = torch.Generator(device=__a ).manual_seed(0 )
__lowercase : str = alt_pipe(
[prompt] , generator=__a , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__a , return_dict=__a , )[0]
__lowercase : Dict = image[0, -3:, -3:, -1]
__lowercase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase : Any = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = self.dummy_cond_unet
__lowercase : List[Any] = PNDMScheduler(skip_prk_steps=__a )
__lowercase : Union[str, Any] = self.dummy_vae
__lowercase : Dict = self.dummy_text_encoder
__lowercase : Optional[Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
__lowercase : Union[str, Any] = 77
__lowercase : Any = self.dummy_image.to(__a )
# put models in fp16
__lowercase : Tuple = unet.half()
__lowercase : List[Any] = vae.half()
__lowercase : Optional[Any] = bert.half()
# make sure here that pndm scheduler skips prk
__lowercase : Dict = AltDiffusionImgaImgPipeline(
unet=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , safety_checker=__a , feature_extractor=self.dummy_extractor , )
__lowercase : int = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__a )
__lowercase : List[Any] = alt_pipe.to(__a )
alt_pipe.set_progress_bar_config(disable=__a )
__lowercase : Optional[int] = """A painting of a squirrel eating a burger"""
__lowercase : Any = torch.manual_seed(0 )
__lowercase : Union[str, Any] = alt_pipe(
[prompt] , generator=__a , num_inference_steps=2 , output_type="""np""" , image=__a , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
__lowercase : Dict = init_image.resize((760, 504) )
__lowercase : Tuple = """BAAI/AltDiffusion"""
__lowercase : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__lowercase : List[str] = """A fantasy landscape, trending on artstation"""
__lowercase : Union[str, Any] = torch.manual_seed(0 )
__lowercase : Any = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type="""np""" , )
__lowercase : List[Any] = output.images[0]
__lowercase : Tuple = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
__lowercase : Optional[Any] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__lowercase : List[Any] = init_image.resize((768, 512) )
__lowercase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
__lowercase : Union[str, Any] = """BAAI/AltDiffusion"""
__lowercase : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
__a , safety_checker=__a , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__lowercase : Dict = """A fantasy landscape, trending on artstation"""
__lowercase : Dict = torch.manual_seed(0 )
__lowercase : Any = pipe(
prompt=__a , image=__a , strength=0.75 , guidance_scale=7.5 , generator=__a , output_type="""np""" , )
__lowercase : Union[str, Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 306
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
| 306
| 1
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 306
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 306
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Tuple = '''microsoft/speecht5_tts'''
_A : List[str] = (
'''This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '''
'''text to read (in English) and returns a waveform object containing the sound.'''
)
_A : Optional[int] = '''text_reader'''
_A : List[Any] = SpeechTaProcessor
_A : List[Any] = SpeechTaForTextToSpeech
_A : Union[str, Any] = SpeechTaHifiGan
_A : Tuple = ['''text''']
_A : Union[str, Any] = ['''audio''']
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if self.post_processor is None:
__lowercase : List[str] = """microsoft/speecht5_hifigan"""
super().setup()
def lowerCAmelCase ( self : Any , __a : Optional[int] , __a : int=None ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.pre_processor(text=__a , return_tensors="""pt""" , truncation=__a )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
__lowercase : Union[str, Any] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
__lowercase : Any = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowerCAmelCase ( self : Tuple , __a : Any ) -> Any:
"""simple docstring"""
with torch.no_grad():
return self.model.generate_speech(**__a )
def lowerCAmelCase ( self : int , __a : Union[str, Any] ) -> int:
"""simple docstring"""
with torch.no_grad():
return self.post_processor(__a ).cpu().detach()
| 306
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : List[str] = 2_56
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
__lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( lowerCAmelCase_ : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , *,
__a : float = 0.85 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = duplication_jaccard_threshold
__lowercase : Optional[Any] = NUM_PERM
__lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase : List[str] = defaultdict(__a )
def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None:
"""simple docstring"""
__lowercase : List[Any] = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase : Dict = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase : List[str] = [base] + list(__a )
# reformat the cluster to be a list of dict
__lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def lowerCAmelCase ( self : Any , __a : int ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase , __lowercase : Union[str, Any] = element
__lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ):
__lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[str] = get_tokens(lowerCAmelCase_ )
__lowercase : Dict = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : List[str] = None
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = []
for elementa in cluster:
__lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase : Dict = 1
extremes.append(lowerCAmelCase_ )
return extremes
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
global _shared_dataset
__lowercase : Tuple = dataset
__lowercase : Optional[int] = []
__lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ):
__lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase : int = {}
__lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase : Optional[Any] = element
__lowercase : int = duplicate_indices - set(extreme_dict.keys() )
__lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(lowerCAmelCase_ )}" )
print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" )
print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" )
return ds_filter, duplicate_clusters
| 306
| 1
|
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs('''hub/hopper-medium-v2/unet/hor32''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/unet/hor128''', exist_ok=True)
os.makedirs('''hub/hopper-medium-v2/value_function''', exist_ok=True)
def snake_case_ ( lowerCAmelCase_ : str ):
if hor == 128:
__lowercase : Any = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__lowercase : List[Any] = (32, 128, 256)
__lowercase : Any = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
__lowercase : str = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
__lowercase : int = (32, 64, 128, 256)
__lowercase : int = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
__lowercase : List[str] = torch.load(F"/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch" )
__lowercase : Any = model.state_dict()
__lowercase : Union[str, Any] = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 65536,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
__lowercase : Dict = UNetaDModel(**lowerCAmelCase_ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__lowercase : Any = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowercase : Union[str, Any] = state_dict.pop(lowerCAmelCase_ )
hf_value_function.load_state_dict(lowerCAmelCase_ )
torch.save(hf_value_function.state_dict() , F"hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin" )
with open(F"hub/hopper-medium-v2/unet/hor{hor}/config.json" , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : List[Any] = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 128, 256),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 65536,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
__lowercase : Optional[Any] = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
__lowercase : int = model
__lowercase : Union[str, Any] = UNetaDModel(**lowerCAmelCase_ )
print(F"length of state dict: {len(state_dict.keys() )}" )
print(F"length of value function dict: {len(hf_value_function.state_dict().keys() )}" )
__lowercase : str = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
__lowercase : int = state_dict.pop(lowerCAmelCase_ )
hf_value_function.load_state_dict(lowerCAmelCase_ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 306
|
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''image_processor''', '''feature_extractor''']
_A : List[Any] = '''TvltImageProcessor'''
_A : Optional[int] = '''TvltFeatureExtractor'''
def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__(image_processor=__a , feature_extractor=__a )
__lowercase : Union[str, Any] = image_processor
__lowercase : Tuple = feature_extractor
def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowercase : Tuple = None
if images is not None:
__lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
__lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
__lowercase : Optional[Any] = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
__lowercase : Tuple = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.image_processor.model_input_names
__lowercase : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 306
| 1
|
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCAmelCase ( __a ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] , __a : str ) -> Optional[Any]:
"""simple docstring"""
with open(__a , encoding="""utf-8""" ) as input_file:
__lowercase : Union[str, Any] = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
__lowercase : Tuple = input_file.read()
__lowercase : Union[str, Any] = regexp.search(__a )
return match
def lowerCAmelCase ( self : int , __a : str ) -> str:
"""simple docstring"""
with open(__a , encoding="""utf-8""" ) as input_file:
__lowercase : List[str] = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
__lowercase : int = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowercase : Dict = regexp.finditer(__a )
__lowercase : List[str] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = Path("""./datasets""" )
__lowercase : List[str] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__a ) ):
raise AssertionError(F"open(...) must use utf-8 encoding in {dataset}" )
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = Path("""./datasets""" )
__lowercase : Optional[Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__a ) ):
raise AssertionError(F"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 306
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : int = batch_size
__lowercase : Any = seq_length
__lowercase : str = is_training
__lowercase : str = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : int = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : Union[str, Any] = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : Tuple = scope
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
__lowercase : str = model(__a , attention_mask=__a )
__lowercase : List[Any] = model(__a )
__lowercase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowercase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Any = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[str] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = False
_A : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = ()
_A : List[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Optional[Any] = True
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = EsmModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : List[str] = EsmEmbeddings(config=__a )
__lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowercase : int = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : Optional[Any] = EsmEmbeddings(config=__a )
__lowercase : Optional[int] = torch.empty(2 , 4 , 30 )
__lowercase : Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( __a ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[str] = model(__a )[0]
__lowercase : Union[str, Any] = 33
__lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
__lowercase : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : Any = model(__a )[0]
# compare the actual values for a slice.
__lowercase : int = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 306
| 1
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[Any] = '''altclip_text_model'''
def __init__( self : Optional[Any] , __a : int=250002 , __a : Dict=1024 , __a : Optional[Any]=24 , __a : Optional[int]=16 , __a : Optional[int]=4096 , __a : Union[str, Any]="gelu" , __a : Optional[int]=0.1 , __a : Tuple=0.1 , __a : Optional[Any]=514 , __a : Optional[int]=1 , __a : Dict=0.02 , __a : Optional[int]=0.02 , __a : int=1E-05 , __a : int=1 , __a : Any=0 , __a : Union[str, Any]=2 , __a : Any="absolute" , __a : Any=True , __a : Tuple=768 , **__a : Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__lowercase : Optional[int] = vocab_size
__lowercase : List[str] = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : str = num_attention_heads
__lowercase : Any = hidden_act
__lowercase : int = intermediate_size
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : List[str] = attention_probs_dropout_prob
__lowercase : int = max_position_embeddings
__lowercase : Optional[Any] = type_vocab_size
__lowercase : Any = initializer_range
__lowercase : Optional[Any] = initializer_factor
__lowercase : int = layer_norm_eps
__lowercase : List[str] = position_embedding_type
__lowercase : Any = use_cache
__lowercase : Optional[Any] = project_dim
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Any = '''altclip_vision_model'''
def __init__( self : Tuple , __a : List[Any]=768 , __a : Optional[int]=3072 , __a : Optional[Any]=512 , __a : Union[str, Any]=12 , __a : Any=12 , __a : Optional[int]=3 , __a : Any=224 , __a : Dict=32 , __a : int="quick_gelu" , __a : Dict=1E-5 , __a : Dict=0.0 , __a : Optional[Any]=0.02 , __a : Dict=1.0 , **__a : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__a )
__lowercase : List[Any] = hidden_size
__lowercase : Optional[int] = intermediate_size
__lowercase : Tuple = projection_dim
__lowercase : List[Any] = num_hidden_layers
__lowercase : Optional[Any] = num_attention_heads
__lowercase : str = num_channels
__lowercase : Optional[int] = patch_size
__lowercase : Optional[Any] = image_size
__lowercase : List[str] = initializer_range
__lowercase : Optional[int] = initializer_factor
__lowercase : str = attention_dropout
__lowercase : str = layer_norm_eps
__lowercase : List[Any] = hidden_act
@classmethod
def lowerCAmelCase ( cls : int , __a : Union[str, os.PathLike] , **__a : List[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__a )
__lowercase , __lowercase : Optional[Any] = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
__lowercase : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = '''altclip'''
_A : str = True
def __init__( self : Optional[Any] , __a : Union[str, Any]=None , __a : str=None , __a : Optional[int]=768 , __a : Optional[int]=2.6592 , **__a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = kwargs.pop("""text_config_dict""" , __a )
__lowercase : Optional[int] = kwargs.pop("""vision_config_dict""" , __a )
super().__init__(**__a )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__lowercase : Optional[Any] = {}
# This is the complete result when using `text_config_dict`.
__lowercase : Tuple = AltCLIPTextConfig(**__a ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__lowercase : List[Any] = (
F"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
F"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__lowercase : Union[str, Any] = (
F"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
F"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(__a )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__lowercase : Tuple = {}
# This is the complete result when using `vision_config_dict`.
__lowercase : Any = AltCLIPVisionConfig(**__a ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__lowercase : int = {
str(__a ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__lowercase : Optional[int] = (
F"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
F"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__lowercase : Optional[int] = (
F"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
F"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(__a )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__lowercase : List[str] = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
__lowercase : int = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
__lowercase : int = AltCLIPTextConfig(**__a )
__lowercase : int = AltCLIPVisionConfig(**__a )
__lowercase : Optional[int] = projection_dim
__lowercase : Dict = logit_scale_init_value
__lowercase : Any = 1.0
@classmethod
def lowerCAmelCase ( cls : Optional[int] , __a : AltCLIPTextConfig , __a : AltCLIPVisionConfig , **__a : List[Any] ) -> int:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Tuple = copy.deepcopy(self.__dict__ )
__lowercase : Optional[Any] = self.text_config.to_dict()
__lowercase : int = self.vision_config.to_dict()
__lowercase : Tuple = self.__class__.model_type
return output
| 306
|
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 306
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Union[str, Any] = '''roberta'''
def __init__( self : Union[str, Any] , __a : int=50265 , __a : int=768 , __a : Dict=12 , __a : List[Any]=12 , __a : List[str]=3072 , __a : List[str]="gelu" , __a : Optional[Any]=0.1 , __a : List[Any]=0.1 , __a : Dict=512 , __a : Optional[int]=2 , __a : Dict=0.02 , __a : Dict=1E-12 , __a : str=1 , __a : Optional[int]=0 , __a : List[str]=2 , __a : Union[str, Any]="absolute" , __a : List[Any]=True , __a : List[str]=None , **__a : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__lowercase : Optional[int] = vocab_size
__lowercase : str = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : List[str] = num_attention_heads
__lowercase : str = hidden_act
__lowercase : Any = intermediate_size
__lowercase : Tuple = hidden_dropout_prob
__lowercase : int = attention_probs_dropout_prob
__lowercase : Tuple = max_position_embeddings
__lowercase : Tuple = type_vocab_size
__lowercase : int = initializer_range
__lowercase : Dict = layer_norm_eps
__lowercase : Tuple = position_embedding_type
__lowercase : Union[str, Any] = use_cache
__lowercase : Union[str, Any] = classifier_dropout
class lowerCAmelCase ( __a ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__lowercase : Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowercase : List[str] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 306
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = (DPMSolverSDEScheduler,)
_A : Dict = 10
def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**__a )
return config
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.scheduler_classes[0]
__lowercase : List[str] = self.get_scheduler_config()
__lowercase : Any = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[Any] = self.dummy_model()
__lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Optional[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Optional[Any] = scheduler.step(__a , __a , __a )
__lowercase : str = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowercase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[int] = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Dict = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Dict = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[int] = model(__a , __a )
__lowercase : Optional[int] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : List[str] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config()
__lowercase : Optional[int] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : int = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase : int = scheduler.scale_model_input(__a , __a )
__lowercase : List[str] = model(__a , __a )
__lowercase : List[str] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : List[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.scheduler_classes[0]
__lowercase : List[Any] = self.get_scheduler_config()
__lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : List[str] = self.dummy_model()
__lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
__lowercase : str = sample.to(__a )
for t in scheduler.timesteps:
__lowercase : List[Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Any = scheduler.step(__a , __a , __a )
__lowercase : Optional[Any] = output.prev_sample
__lowercase : Any = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 306
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Dict = '''dandelin/vilt-b32-finetuned-vqa'''
_A : Optional[int] = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
_A : int = '''image_qa'''
_A : Union[str, Any] = AutoProcessor
_A : Optional[Any] = AutoModelForVisualQuestionAnswering
_A : Union[str, Any] = ['''image''', '''text''']
_A : Any = ['''text''']
def __init__( self : List[Any] , *__a : Tuple , **__a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*__a , **__a )
def lowerCAmelCase ( self : Dict , __a : "Image" , __a : str ) -> int:
"""simple docstring"""
return self.pre_processor(__a , __a , return_tensors="""pt""" )
def lowerCAmelCase ( self : Tuple , __a : List[str] ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
return self.model(**__a ).logits
def lowerCAmelCase ( self : Optional[int] , __a : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 306
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : str = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase : List[str] = args.per_device_eval_batch_size
lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : List[str] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ )
# start time
__lowercase : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowercase : int = time.time()
__lowercase : Union[str, Any] = end_time - start_time
__lowercase : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names
lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def snake_case_ ( lowerCAmelCase_ : int ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowercase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowercase : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowercase : Any = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ )
__lowercase : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowercase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowercase : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCamelCase : Tuple = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase : Union[str, Any] = default_data_collator
lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowercase : int = postprocess_qa_predictions(
examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowercase : Optional[int] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ )
lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def snake_case_ ( lowerCAmelCase_ : str ):
return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase : int = 0.0
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = timeit.default_timer()
lowerCamelCase : List[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : Dict = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 306
| 1
|
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCamelCase : Optional[int] = '''Usage of script: script_name <size_of_canvas:int>'''
lowerCamelCase : int = [0] * 1_00 + [1] * 10
random.shuffle(choice)
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : str = [[False for i in range(lowerCAmelCase_ )] for j in range(lowerCAmelCase_ )]
return canvas
def snake_case_ ( lowerCAmelCase_ : list[list[bool]] ):
for i, row in enumerate(lowerCAmelCase_ ):
for j, _ in enumerate(lowerCAmelCase_ ):
__lowercase : Union[str, Any] = bool(random.getrandbits(1 ) )
def snake_case_ ( lowerCAmelCase_ : list[list[bool]] ):
__lowercase : int = np.array(lowerCAmelCase_ )
__lowercase : str = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(lowerCAmelCase_ ):
for c, pt in enumerate(lowerCAmelCase_ ):
__lowercase : Optional[int] = __judge_point(
lowerCAmelCase_ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowercase : str = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowercase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def snake_case_ ( lowerCAmelCase_ : bool , lowerCAmelCase_ : list[list[bool]] ):
__lowercase : Tuple = 0
__lowercase : int = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowercase : int = pt
if pt:
if alive < 2:
__lowercase : Any = False
elif alive == 2 or alive == 3:
__lowercase : Optional[int] = True
elif alive > 3:
__lowercase : Any = False
else:
if alive == 3:
__lowercase : List[Any] = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCamelCase : Dict = int(sys.argv[1])
# main working structure of this module.
lowerCamelCase : str = create_canvas(canvas_size)
seed(c)
lowerCamelCase ,lowerCamelCase : List[str] = plt.subplots()
fig.show()
lowerCamelCase : Any = ListedColormap(['''w''', '''k'''])
try:
while True:
lowerCamelCase : List[Any] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 306
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''nllb-moe'''
_A : List[str] = ['''past_key_values''']
_A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any:
"""simple docstring"""
__lowercase : int = vocab_size
__lowercase : List[Any] = max_position_embeddings
__lowercase : Tuple = d_model
__lowercase : str = encoder_ffn_dim
__lowercase : List[str] = encoder_layers
__lowercase : int = encoder_attention_heads
__lowercase : List[Any] = decoder_ffn_dim
__lowercase : int = decoder_layers
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : Union[str, Any] = dropout
__lowercase : str = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : List[Any] = activation_function
__lowercase : List[str] = init_std
__lowercase : Optional[int] = encoder_layerdrop
__lowercase : str = decoder_layerdrop
__lowercase : Dict = use_cache
__lowercase : Optional[Any] = encoder_layers
__lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : List[Any] = router_z_loss_coef
__lowercase : Tuple = router_aux_loss_coef
__lowercase : str = decoder_sparse_step
__lowercase : Any = encoder_sparse_step
__lowercase : str = num_experts
__lowercase : List[Any] = expert_capacity
__lowercase : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase : Optional[int] = router_dtype
__lowercase : Any = router_ignore_padding_tokens
__lowercase : Optional[Any] = batch_prioritized_routing
__lowercase : str = second_expert_policy
__lowercase : List[str] = normalize_router_prob_before_dropping
__lowercase : List[Any] = moe_eval_capacity_token_fraction
__lowercase : List[str] = moe_token_dropout
__lowercase : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 306
| 1
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : List[str] = 2_56
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
__lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( lowerCAmelCase_ : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , *,
__a : float = 0.85 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = duplication_jaccard_threshold
__lowercase : Optional[Any] = NUM_PERM
__lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase : List[str] = defaultdict(__a )
def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None:
"""simple docstring"""
__lowercase : List[Any] = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase : Dict = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase : List[str] = [base] + list(__a )
# reformat the cluster to be a list of dict
__lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def lowerCAmelCase ( self : Any , __a : int ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase , __lowercase : Union[str, Any] = element
__lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ):
__lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[str] = get_tokens(lowerCAmelCase_ )
__lowercase : Dict = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : List[str] = None
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = []
for elementa in cluster:
__lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase : Dict = 1
extremes.append(lowerCAmelCase_ )
return extremes
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
global _shared_dataset
__lowercase : Tuple = dataset
__lowercase : Optional[int] = []
__lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ):
__lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase : int = {}
__lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase : Optional[Any] = element
__lowercase : int = duplicate_indices - set(extreme_dict.keys() )
__lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(lowerCAmelCase_ )}" )
print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" )
print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" )
return ds_filter, duplicate_clusters
| 306
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[Any] = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['''PoolFormerFeatureExtractor''']
lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 306
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , __a : Dict , __a : int=7 , __a : Dict=3 , __a : str=30 , __a : Optional[Any]=400 , __a : Tuple=True , __a : Optional[Any]=None , __a : str=True , __a : Any=1 / 255 , __a : Optional[Any]=True , __a : List[Any]=[0.5, 0.5, 0.5] , __a : Optional[Any]=[0.5, 0.5, 0.5] , __a : List[str]=True , ) -> int:
"""simple docstring"""
__lowercase : int = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
__lowercase : Optional[Any] = parent
__lowercase : Optional[int] = batch_size
__lowercase : str = num_channels
__lowercase : Dict = min_resolution
__lowercase : Dict = max_resolution
__lowercase : str = do_resize
__lowercase : Optional[Any] = size
__lowercase : Union[str, Any] = do_rescale
__lowercase : Any = rescale_factor
__lowercase : Tuple = do_normalize
__lowercase : List[Any] = image_mean
__lowercase : int = image_std
__lowercase : Optional[int] = do_pad
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : int , __a : List[Any] , __a : str=False ) -> List[str]:
"""simple docstring"""
if not batched:
__lowercase : List[str] = image_inputs[0]
if isinstance(__a , Image.Image ):
__lowercase , __lowercase : Dict = image.size
else:
__lowercase , __lowercase : Any = image.shape[1], image.shape[2]
if w < h:
__lowercase : int = int(self.size["""shortest_edge"""] * h / w )
__lowercase : Any = self.size["""shortest_edge"""]
elif w > h:
__lowercase : str = self.size["""shortest_edge"""]
__lowercase : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
__lowercase : Tuple = self.size["""shortest_edge"""]
__lowercase : Any = self.size["""shortest_edge"""]
else:
__lowercase : List[Any] = []
for image in image_inputs:
__lowercase , __lowercase : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowercase : Tuple = max(__a , key=lambda __a : item[0] )[0]
__lowercase : Union[str, Any] = max(__a , key=lambda __a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = DetrImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = DetrImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , """image_mean""" ) )
self.assertTrue(hasattr(__a , """image_std""" ) )
self.assertTrue(hasattr(__a , """do_normalize""" ) )
self.assertTrue(hasattr(__a , """do_rescale""" ) )
self.assertTrue(hasattr(__a , """rescale_factor""" ) )
self.assertTrue(hasattr(__a , """do_resize""" ) )
self.assertTrue(hasattr(__a , """size""" ) )
self.assertTrue(hasattr(__a , """do_pad""" ) )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , __a )
__lowercase : Union[str, Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__a )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , __a )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
__lowercase : Any = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowercase , __lowercase : List[str] = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase , __lowercase : Dict = self.image_processor_tester.get_expected_values(__a , batched=__a )
__lowercase : Tuple = image_processing(__a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
# Test not batched input
__lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowercase , __lowercase : Union[str, Any] = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase : Dict = image_processing(__a , return_tensors="""pt""" ).pixel_values
__lowercase , __lowercase : List[Any] = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
__lowercase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__lowercase , __lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(__a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase : int = image_processing(__a , return_tensors="""pt""" ).pixel_values
__lowercase , __lowercase : Optional[Any] = self.image_processor_tester.get_expected_values(__a , batched=__a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__lowercase : List[str] = json.loads(f.read() )
__lowercase : Dict = {"""image_id""": 39769, """annotations""": target}
# encode them
__lowercase : List[str] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
__lowercase : Optional[int] = image_processing(images=__a , annotations=__a , return_tensors="""pt""" )
# verify pixel values
__lowercase : Optional[Any] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __a )
__lowercase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __a , atol=1E-4 ) )
# verify area
__lowercase : List[Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __a ) )
# verify boxes
__lowercase : str = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __a )
__lowercase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __a , atol=1E-3 ) )
# verify image_id
__lowercase : Tuple = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __a ) )
# verify is_crowd
__lowercase : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __a ) )
# verify class_labels
__lowercase : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __a ) )
# verify orig_size
__lowercase : List[str] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __a ) )
# verify size
__lowercase : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __a ) )
@slow
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__lowercase : Tuple = json.loads(f.read() )
__lowercase : Any = {"""file_name""": """000000039769.png""", """image_id""": 39769, """segments_info""": target}
__lowercase : Optional[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__lowercase : Optional[int] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
__lowercase : List[Any] = image_processing(images=__a , annotations=__a , masks_path=__a , return_tensors="""pt""" )
# verify pixel values
__lowercase : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , __a )
__lowercase : List[str] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , __a , atol=1E-4 ) )
# verify area
__lowercase : int = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , __a ) )
# verify boxes
__lowercase : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , __a )
__lowercase : Optional[int] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , __a , atol=1E-3 ) )
# verify image_id
__lowercase : List[Any] = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , __a ) )
# verify is_crowd
__lowercase : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , __a ) )
# verify class_labels
__lowercase : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , __a ) )
# verify masks
__lowercase : Dict = 822873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , __a )
# verify orig_size
__lowercase : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , __a ) )
# verify size
__lowercase : Dict = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , __a ) )
| 306
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase : List[str] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ['''MobileViTFeatureExtractor''']
lowerCamelCase : Any = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 306
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__lowercase : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowercase : Optional[Any] = model(__a )["""last_hidden_state"""]
__lowercase : Any = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__lowercase : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 306
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : str = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Union[str, Any] = '''lxmert'''
_A : Tuple = {}
def __init__( self : Tuple , __a : Dict=30522 , __a : Union[str, Any]=768 , __a : Optional[int]=12 , __a : Optional[Any]=9500 , __a : Optional[Any]=1600 , __a : int=400 , __a : List[str]=3072 , __a : Union[str, Any]="gelu" , __a : List[str]=0.1 , __a : str=0.1 , __a : Any=512 , __a : List[Any]=2 , __a : Union[str, Any]=0.02 , __a : Dict=1E-12 , __a : Optional[Any]=9 , __a : Any=5 , __a : Any=5 , __a : int=2048 , __a : Dict=4 , __a : List[str]=6.67 , __a : Any=True , __a : List[Any]=True , __a : Union[str, Any]=True , __a : int=True , __a : List[Any]=True , __a : List[Any]=True , __a : int=True , **__a : List[Any] , ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = vocab_size
__lowercase : Tuple = hidden_size
__lowercase : List[Any] = num_attention_heads
__lowercase : List[str] = hidden_act
__lowercase : Optional[int] = intermediate_size
__lowercase : List[Any] = hidden_dropout_prob
__lowercase : Any = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : Optional[int] = type_vocab_size
__lowercase : List[Any] = initializer_range
__lowercase : Dict = layer_norm_eps
__lowercase : List[str] = num_qa_labels
__lowercase : List[str] = num_object_labels
__lowercase : List[str] = num_attr_labels
__lowercase : Tuple = l_layers
__lowercase : str = x_layers
__lowercase : List[Any] = r_layers
__lowercase : List[Any] = visual_feat_dim
__lowercase : Optional[int] = visual_pos_dim
__lowercase : str = visual_loss_normalizer
__lowercase : int = task_matched
__lowercase : int = task_mask_lm
__lowercase : Optional[Any] = task_obj_predict
__lowercase : List[Any] = task_qa
__lowercase : Tuple = visual_obj_loss
__lowercase : Tuple = visual_attr_loss
__lowercase : Optional[Any] = visual_feat_loss
__lowercase : Dict = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**__a )
| 306
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
__lowercase : str = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Tuple = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Optional[Any] = True
if a[i].islower():
__lowercase : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = BlenderbotSmallTokenizer
_A : str = False
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase : Dict = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
__lowercase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : str = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
__lowercase : int = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
__lowercase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Tuple , **__a : Union[str, Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = """adapt act apte"""
__lowercase : Any = """adapt act apte"""
return input_text, output_text
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : Dict = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """adapt act apte"""
__lowercase : int = ["""adapt""", """act""", """ap@@""", """te"""]
__lowercase : Any = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__lowercase : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__lowercase : Union[str, Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [1384]
__lowercase : int = """I am a small frog."""
__lowercase : int = tok([src_text] , padding=__a , truncation=__a )["""input_ids"""]
__lowercase : Optional[Any] = tok.batch_decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Tuple = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
__lowercase : Optional[Any] = """I am a small frog ."""
__lowercase : Tuple = """."""
__lowercase : List[Any] = tok(__a )["""input_ids"""]
__lowercase : List[str] = tok(__a )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 306
|
from scipy.stats import spearmanr
import datasets
lowerCamelCase : List[str] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCamelCase : List[str] = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCamelCase : Union[str, Any] = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = spearmanr(__a , __a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 306
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 306
| 1
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ) -> List[str]:
"""simple docstring"""
super().__init__()
__lowercase : List[str] = nn.Linear(3 , 4 )
__lowercase : Union[str, Any] = nn.BatchNormad(4 )
__lowercase : List[str] = nn.Linear(4 , 5 )
def lowerCAmelCase ( self : Optional[Any] , __a : int ) -> Tuple:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__a , model.state_dict() )
__lowercase : List[str] = os.path.join(__a , """index.json""" )
self.assertTrue(os.path.isfile(__a ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
__lowercase : Optional[int] = os.path.join(__a , F"{key}.dat" )
self.assertTrue(os.path.isfile(__a ) )
# TODO: add tests on the fact weights are properly loaded
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : str = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
__lowercase : Optional[int] = torch.randn(2 , 3 , dtype=__a )
with TemporaryDirectory() as tmp_dir:
__lowercase : Optional[int] = offload_weight(__a , """weight""" , __a , {} )
__lowercase : List[str] = os.path.join(__a , """weight.dat""" )
self.assertTrue(os.path.isfile(__a ) )
self.assertDictEqual(__a , {"""weight""": {"""shape""": [2, 3], """dtype""": str(__a ).split(""".""" )[1]}} )
__lowercase : Optional[int] = load_offloaded_weight(__a , index["""weight"""] )
self.assertTrue(torch.equal(__a , __a ) )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = ModelForTest()
__lowercase : Union[str, Any] = model.state_dict()
__lowercase : List[str] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
__lowercase : Union[str, Any] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__a , __a )
__lowercase : Any = OffloadedWeightsLoader(state_dict=__a , save_folder=__a )
# Every key is there with the right value
self.assertEqual(sorted(__a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__a , weight_map[key] ) )
__lowercase : List[Any] = {k: v for k, v in state_dict.items() if """weight""" in k}
__lowercase : str = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__a , __a )
__lowercase : Tuple = OffloadedWeightsLoader(state_dict=__a , save_folder=__a )
# Every key is there with the right value
self.assertEqual(sorted(__a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__a , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__a , __a )
# Duplicates are removed
__lowercase : Tuple = OffloadedWeightsLoader(state_dict=__a , save_folder=__a )
# Every key is there with the right value
self.assertEqual(sorted(__a ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__a , weight_map[key] ) )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
__lowercase : str = extract_submodules_state_dict(__a , ["""a.1""", """a.2"""] )
self.assertDictEqual(__a , {"""a.1""": 0, """a.2""": 2} )
__lowercase : Dict = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
__lowercase : Optional[int] = extract_submodules_state_dict(__a , ["""a.1""", """a.2"""] )
self.assertDictEqual(__a , {"""a.1.a""": 0, """a.2.a""": 2} )
| 306
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 306
| 1
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowerCamelCase : Tuple = logging.getLogger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict ) -> int:
"""simple docstring"""
__lowercase : Tuple = False
def lowerCAmelCase ( self : Dict , __a : Tuple , __a : Dict , __a : Optional[int] , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if not self.initialized:
__lowercase : Union[str, Any] = RagRetriever(
__a , question_encoder_tokenizer=__a , generator_tokenizer=__a , index=__a , init_retrieval=__a , )
__lowercase : int = True
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
self.retriever.index.init_index()
def lowerCAmelCase ( self : str , __a : Union[str, Any] , __a : str ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : Union[str, Any] = self.retriever._main_retrieve(__a , __a )
return doc_ids, retrieved_doc_embeds
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : str , __a : Any , __a : int , __a : Dict , __a : int , __a : Union[str, Any]=None ) -> str:
"""simple docstring"""
if index is not None and index.is_initialized() and len(__a ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
__a , question_encoder_tokenizer=__a , generator_tokenizer=__a , index=__a , init_retrieval=__a , )
__lowercase : Dict = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__a , __a , __a , __a )
for worker in self.retrieval_workers
] )
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def lowerCAmelCase ( self : str , __a : Optional[int] , __a : int ) -> Optional[Any]:
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__lowercase : Union[str, Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__lowercase , __lowercase : Optional[int] = ray.get(random_worker.retrieve.remote(__a , __a ) )
else:
__lowercase , __lowercase : List[str] = self._main_retrieve(__a , __a )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__a )
@classmethod
def lowerCAmelCase ( cls : Union[str, Any] , __a : str , __a : Tuple=None , **__a : Optional[int] ) -> Tuple:
"""simple docstring"""
return super(__a , cls ).get_tokenizers(__a , __a , **__a )
@classmethod
def lowerCAmelCase ( cls : List[Any] , __a : int , __a : Dict , __a : List[str]=None , **__a : List[str] ) -> Any:
"""simple docstring"""
__lowercase : int = kwargs.pop("""config""" , __a ) or RagConfig.from_pretrained(__a , **__a )
__lowercase : Tuple = RagTokenizer.from_pretrained(__a , config=__a )
__lowercase : Union[str, Any] = rag_tokenizer.question_encoder
__lowercase : Tuple = rag_tokenizer.generator
if indexed_dataset is not None:
__lowercase : int = """custom"""
__lowercase : Dict = CustomHFIndex(config.retrieval_vector_size , __a )
else:
__lowercase : List[Any] = cls._build_index(__a )
return cls(
__a , question_encoder_tokenizer=__a , generator_tokenizer=__a , retrieval_workers=__a , index=__a , )
| 306
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 306
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
for attribute in key.split(""".""" ):
__lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__lowercase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase : Dict = value
elif weight_type == "weight_g":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowercase : List[Any] = value
elif weight_type == "bias":
__lowercase : int = value
elif weight_type == "running_mean":
__lowercase : List[Any] = value
elif weight_type == "running_var":
__lowercase : int = value
elif weight_type == "num_batches_tracked":
__lowercase : int = value
elif weight_type == "inv_freq":
__lowercase : Optional[Any] = value
else:
__lowercase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = []
__lowercase : Any = fairseq_model.state_dict()
__lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase : Tuple = True
if "*" in mapped_key:
__lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
__lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "pos_bias_u" in name:
__lowercase : Any = None
elif "pos_bias_v" in name:
__lowercase : Tuple = None
elif "weight_g" in name:
__lowercase : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__lowercase : Dict = """weight_v"""
elif "bias" in name:
__lowercase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase : str = """weight"""
elif "running_mean" in name:
__lowercase : str = """running_mean"""
elif "inv_freq" in name:
__lowercase : List[Any] = """inv_freq"""
elif "running_var" in name:
__lowercase : Any = """running_var"""
elif "num_batches_tracked" in name:
__lowercase : Any = """num_batches_tracked"""
else:
__lowercase : Optional[int] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowercase : int = name.split(""".""" )
__lowercase : Optional[Any] = int(items[0] )
__lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ):
if config_path is not None:
__lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" )
else:
__lowercase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase : Tuple = """rotary"""
if is_finetuned:
if dict_path:
__lowercase : Any = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : List[Any] = target_dict.pad_index
__lowercase : Optional[int] = target_dict.bos_index
__lowercase : List[Any] = target_dict.eos_index
__lowercase : List[str] = len(target_dict.symbols )
__lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase : int = 0
__lowercase : Any = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
__lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False
__lowercase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
__lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
__lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
__lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
__lowercase : Dict = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : Any = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 306
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 1
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def snake_case_ ( ):
__lowercase : Dict = [randint(-1000 , 1000 ) for i in range(10 )]
__lowercase : Tuple = randint(-5000 , 5000 )
return (arr, r)
lowerCamelCase : Optional[Any] = make_dataset()
def snake_case_ ( lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int ):
for triplet in permutations(lowerCAmelCase_ , 3 ):
if sum(lowerCAmelCase_ ) == target:
return tuple(sorted(lowerCAmelCase_ ) )
return (0, 0, 0)
def snake_case_ ( lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int ):
arr.sort()
__lowercase : Union[str, Any] = len(lowerCAmelCase_ )
for i in range(n - 1 ):
__lowercase , __lowercase : int = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def snake_case_ ( ):
__lowercase : List[str] = """
from __main__ import dataset, triplet_sum1, triplet_sum2
"""
__lowercase : Union[str, Any] = """
triplet_sum1(*dataset)
"""
__lowercase : Dict = """
triplet_sum2(*dataset)
"""
__lowercase : Optional[Any] = repeat(setup=lowerCAmelCase_ , stmt=lowerCAmelCase_ , repeat=5 , number=10000 )
__lowercase : Optional[int] = repeat(setup=lowerCAmelCase_ , stmt=lowerCAmelCase_ , repeat=5 , number=10000 )
return (min(lowerCAmelCase_ ), min(lowerCAmelCase_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase : str = solution_times()
print(f'''The time for naive implementation is {times[0]}.''')
print(f'''The time for optimized implementation is {times[1]}.''')
| 306
|
def snake_case_ ( lowerCAmelCase_ : int = 200 ):
__lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__lowercase : List[str] = [0] * (pence + 1)
__lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 306
| 1
|
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def snake_case_ ( lowerCAmelCase_ : List[str] ):
__lowercase : Dict = FileLock(str(tmpdir / """foo.lock""" ) )
__lowercase : Dict = FileLock(str(tmpdir / """foo.lock""" ) )
__lowercase : List[Any] = 0.01
with locka.acquire():
with pytest.raises(lowerCAmelCase_ ):
__lowercase : Union[str, Any] = time.time()
locka.acquire(lowerCAmelCase_ )
assert time.time() - _start > timeout
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
__lowercase : str = """a""" * 1000 + """.lock"""
__lowercase : int = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(lowerCAmelCase_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowercase : Dict = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(lowerCAmelCase_ ):
locka.acquire(0 )
| 306
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a )
| 306
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCamelCase : List[Any] = None
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : str = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : Optional[int] = {
'''facebook/nllb-large-en-ro''': 10_24,
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
lowerCamelCase : Optional[int] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Tuple = VOCAB_FILES_NAMES
_A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Tuple = PRETRAINED_VOCAB_FILES_MAP
_A : List[str] = ['''input_ids''', '''attention_mask''']
_A : Optional[Any] = NllbTokenizer
_A : List[int] = []
_A : List[int] = []
def __init__( self : Union[str, Any] , __a : Tuple=None , __a : Any=None , __a : int="<s>" , __a : str="</s>" , __a : Tuple="</s>" , __a : List[Any]="<s>" , __a : Dict="<unk>" , __a : Any="<pad>" , __a : Optional[Any]="<mask>" , __a : int=None , __a : Union[str, Any]=None , __a : List[Any]=None , __a : int=False , **__a : Dict , ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__lowercase : Optional[int] = legacy_behaviour
super().__init__(
vocab_file=__a , tokenizer_file=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , src_lang=__a , tgt_lang=__a , additional_special_tokens=__a , legacy_behaviour=__a , **__a , )
__lowercase : Tuple = vocab_file
__lowercase : int = False if not self.vocab_file else True
__lowercase : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
__lowercase : Optional[int] = {
lang_code: self.convert_tokens_to_ids(__a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__lowercase : str = src_lang if src_lang is not None else """eng_Latn"""
__lowercase : Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
__lowercase : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCAmelCase ( self : Any , __a : str ) -> None:
"""simple docstring"""
__lowercase : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase ( self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__lowercase : List[Any] = [self.sep_token_id]
__lowercase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : List[str] , __a : List[str] , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__lowercase : str = src_lang
__lowercase : Optional[int] = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
__lowercase : Any = self.convert_tokens_to_ids(__a )
__lowercase : str = tgt_lang_id
return inputs
def lowerCAmelCase ( self : List[str] , __a : List[str] , __a : str = "eng_Latn" , __a : Optional[List[str]] = None , __a : str = "fra_Latn" , **__a : List[Any] , ) -> BatchEncoding:
"""simple docstring"""
__lowercase : Union[str, Any] = src_lang
__lowercase : int = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase ( self : Dict , __a : Optional[Any] ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.convert_tokens_to_ids(__a )
if self.legacy_behaviour:
__lowercase : Any = []
__lowercase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__lowercase : str = [self.cur_lang_code]
__lowercase : int = [self.eos_token_id]
__lowercase : int = self.convert_ids_to_tokens(self.prefix_tokens )
__lowercase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
__lowercase : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase ( self : int , __a : str ) -> None:
"""simple docstring"""
__lowercase : List[str] = self.convert_tokens_to_ids(__a )
if self.legacy_behaviour:
__lowercase : Any = []
__lowercase : str = [self.eos_token_id, self.cur_lang_code]
else:
__lowercase : List[Any] = [self.cur_lang_code]
__lowercase : Union[str, Any] = [self.eos_token_id]
__lowercase : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
__lowercase : int = self.convert_ids_to_tokens(self.suffix_tokens )
__lowercase : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase ( self : int , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
__lowercase : int = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 306
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
for attribute in key.split(""".""" ):
__lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__lowercase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase : Dict = value
elif weight_type == "weight_g":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowercase : List[Any] = value
elif weight_type == "bias":
__lowercase : int = value
elif weight_type == "running_mean":
__lowercase : List[Any] = value
elif weight_type == "running_var":
__lowercase : int = value
elif weight_type == "num_batches_tracked":
__lowercase : int = value
elif weight_type == "inv_freq":
__lowercase : Optional[Any] = value
else:
__lowercase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = []
__lowercase : Any = fairseq_model.state_dict()
__lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase : Tuple = True
if "*" in mapped_key:
__lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
__lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "pos_bias_u" in name:
__lowercase : Any = None
elif "pos_bias_v" in name:
__lowercase : Tuple = None
elif "weight_g" in name:
__lowercase : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__lowercase : Dict = """weight_v"""
elif "bias" in name:
__lowercase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase : str = """weight"""
elif "running_mean" in name:
__lowercase : str = """running_mean"""
elif "inv_freq" in name:
__lowercase : List[Any] = """inv_freq"""
elif "running_var" in name:
__lowercase : Any = """running_var"""
elif "num_batches_tracked" in name:
__lowercase : Any = """num_batches_tracked"""
else:
__lowercase : Optional[int] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowercase : int = name.split(""".""" )
__lowercase : Optional[Any] = int(items[0] )
__lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ):
if config_path is not None:
__lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" )
else:
__lowercase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase : Tuple = """rotary"""
if is_finetuned:
if dict_path:
__lowercase : Any = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : List[Any] = target_dict.pad_index
__lowercase : Optional[int] = target_dict.bos_index
__lowercase : List[Any] = target_dict.eos_index
__lowercase : List[str] = len(target_dict.symbols )
__lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase : int = 0
__lowercase : Any = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
__lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False
__lowercase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
__lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
__lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
__lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
__lowercase : Dict = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : Any = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 306
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[int] = '''donut-swin'''
_A : Dict = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Union[str, Any] , __a : Optional[Any]=224 , __a : List[Any]=4 , __a : List[str]=3 , __a : List[Any]=96 , __a : Optional[Any]=[2, 2, 6, 2] , __a : Union[str, Any]=[3, 6, 12, 24] , __a : Any=7 , __a : str=4.0 , __a : Dict=True , __a : List[str]=0.0 , __a : Optional[Any]=0.0 , __a : Any=0.1 , __a : List[str]="gelu" , __a : str=False , __a : Tuple=0.02 , __a : List[str]=1E-5 , **__a : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = image_size
__lowercase : List[str] = patch_size
__lowercase : Optional[Any] = num_channels
__lowercase : List[str] = embed_dim
__lowercase : Dict = depths
__lowercase : int = len(__a )
__lowercase : List[Any] = num_heads
__lowercase : Tuple = window_size
__lowercase : Optional[int] = mlp_ratio
__lowercase : Optional[Any] = qkv_bias
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : Any = attention_probs_dropout_prob
__lowercase : Any = drop_path_rate
__lowercase : List[str] = hidden_act
__lowercase : Tuple = use_absolute_embeddings
__lowercase : Optional[Any] = layer_norm_eps
__lowercase : Tuple = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowercase : str = int(embed_dim * 2 ** (len(__a ) - 1) )
| 306
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
__lowercase : str = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import random
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = num - 1
__lowercase : List[str] = 0
while s % 2 == 0:
__lowercase : List[str] = s // 2
t += 1
for _ in range(5 ):
__lowercase : List[str] = random.randrange(2 , num - 1 )
__lowercase : Optional[int] = pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if v != 1:
__lowercase : Any = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__lowercase : Union[str, Any] = i + 1
__lowercase : str = (v**2) % num
return True
def snake_case_ ( lowerCAmelCase_ : int ):
if num < 2:
return False
__lowercase : int = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : int = 1024 ):
while True:
__lowercase : int = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowerCAmelCase_ ):
return num
if __name__ == "__main__":
lowerCamelCase : List[str] = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
| 306
| 1
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowerCamelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase : Tuple = '''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)["depth"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline("depth-estimation")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to("cuda")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16
... )
>>> pipe = pipe.to("cuda")
>>> img = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/cat.png"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")
>>> prompt = "A robot, 4k photo"
>>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"
>>> generator = torch.Generator(device="cuda").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save("robot_cat.png")
```
'''
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Any=8 ):
__lowercase : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowercase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : UNetaDConditionModel , __a : DDPMScheduler , __a : VQModel , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__a , scheduler=__a , movq=__a , )
__lowercase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase ( self : List[Any] , __a : Tuple , __a : Any , __a : int , __a : Optional[Any] , __a : Optional[int] , __a : Union[str, Any] ) -> str:
"""simple docstring"""
if latents is None:
__lowercase : int = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowercase : int = latents.to(__a )
__lowercase : Any = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase ( self : Optional[int] , __a : Tuple=0 ) -> Optional[Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowercase : Dict = torch.device(F"cuda:{gpu_id}" )
__lowercase : Optional[Any] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
def lowerCAmelCase ( self : Optional[int] , __a : int=0 ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowercase : str = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowercase : Optional[Any] = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowercase , __lowercase : Union[str, Any] = cpu_offload_with_hook(__a , __a , prev_module_hook=__a )
# We'll offload the last model manually.
__lowercase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__a )
def __call__( self : Dict , __a : Union[torch.FloatTensor, List[torch.FloatTensor]] , __a : Union[torch.FloatTensor, List[torch.FloatTensor]] , __a : torch.FloatTensor , __a : int = 512 , __a : int = 512 , __a : int = 100 , __a : float = 4.0 , __a : int = 1 , __a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , ) -> Any:
"""simple docstring"""
__lowercase : Any = self._execution_device
__lowercase : Union[str, Any] = guidance_scale > 1.0
if isinstance(__a , __a ):
__lowercase : Optional[int] = torch.cat(__a , dim=0 )
if isinstance(__a , __a ):
__lowercase : Optional[int] = torch.cat(__a , dim=0 )
if isinstance(__a , __a ):
__lowercase : str = torch.cat(__a , dim=0 )
__lowercase : Tuple = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowercase : Tuple = image_embeds.repeat_interleave(__a , dim=0 )
__lowercase : Any = negative_image_embeds.repeat_interleave(__a , dim=0 )
__lowercase : Optional[int] = hint.repeat_interleave(__a , dim=0 )
__lowercase : Union[str, Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__a )
__lowercase : Tuple = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__a )
self.scheduler.set_timesteps(__a , device=__a )
__lowercase : str = self.scheduler.timesteps
__lowercase : Dict = self.movq.config.latent_channels
__lowercase , __lowercase : List[Any] = downscale_height_and_width(__a , __a , self.movq_scale_factor )
# create initial latent
__lowercase : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __a , __a , __a , self.scheduler , )
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
__lowercase : List[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase : Union[str, Any] = {"""image_embeds""": image_embeds, """hint""": hint}
__lowercase : Optional[Any] = self.unet(
sample=__a , timestep=__a , encoder_hidden_states=__a , added_cond_kwargs=__a , return_dict=__a , )[0]
if do_classifier_free_guidance:
__lowercase , __lowercase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
__lowercase , __lowercase : Tuple = noise_pred.chunk(2 )
__lowercase , __lowercase : List[Any] = variance_pred.chunk(2 )
__lowercase : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowercase : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowercase , __lowercase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase : Dict = self.scheduler.step(
__a , __a , __a , generator=__a , )[0]
# post-processing
__lowercase : int = self.movq.decode(__a , force_not_quantize=__a )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowercase : Union[str, Any] = image * 0.5 + 0.5
__lowercase : str = image.clamp(0 , 1 )
__lowercase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase : int = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 306
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 306
| 1
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase : List[str] = 16
lowerCamelCase : List[str] = 32
def snake_case_ ( lowerCAmelCase_ : Accelerator , lowerCAmelCase_ : int = 16 ):
__lowercase : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowercase : Optional[int] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCAmelCase_ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowercase : Optional[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowercase : List[str] = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowercase : List[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase_ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowercase : Optional[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowercase : List[Any] = 16
elif accelerator.mixed_precision != "no":
__lowercase : Optional[Any] = 8
else:
__lowercase : int = None
return tokenizer.pad(
lowerCAmelCase_ , padding="""longest""" , max_length=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
__lowercase : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
__lowercase : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , lowerCAmelCase_ ) == "1":
__lowercase : Tuple = 2
# New Code #
__lowercase : Any = int(args.gradient_accumulation_steps )
__lowercase : Dict = int(args.local_sgd_steps )
# Initialize accelerator
__lowercase : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowerCAmelCase_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowercase : Dict = config["""lr"""]
__lowercase : Union[str, Any] = int(config["""num_epochs"""] )
__lowercase : Union[str, Any] = int(config["""seed"""] )
__lowercase : List[str] = int(config["""batch_size"""] )
__lowercase : Any = evaluate.load("""glue""" , """mrpc""" )
set_seed(lowerCAmelCase_ )
__lowercase , __lowercase : List[str] = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowercase : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowercase : Any = model.to(accelerator.device )
# Instantiate optimizer
__lowercase : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCAmelCase_ )
# Instantiate scheduler
__lowercase : List[str] = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase : Any = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
with LocalSGD(
accelerator=lowerCAmelCase_ , model=lowerCAmelCase_ , local_sgd_steps=lowerCAmelCase_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowerCAmelCase_ ):
__lowercase : List[str] = model(**lowerCAmelCase_ )
__lowercase : Optional[int] = output.loss
accelerator.backward(lowerCAmelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowercase : List[Any] = model(**lowerCAmelCase_ )
__lowercase : Tuple = outputs.logits.argmax(dim=-1 )
__lowercase , __lowercase : List[str] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
__lowercase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCAmelCase_ )
def snake_case_ ( ):
__lowercase : Dict = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowerCAmelCase_ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=lowerCAmelCase_ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__lowercase : int = parser.parse_args()
__lowercase : Tuple = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 306
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : List[str] = 2_56
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
__lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( lowerCAmelCase_ : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , *,
__a : float = 0.85 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = duplication_jaccard_threshold
__lowercase : Optional[Any] = NUM_PERM
__lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase : List[str] = defaultdict(__a )
def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None:
"""simple docstring"""
__lowercase : List[Any] = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase : Dict = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase : List[str] = [base] + list(__a )
# reformat the cluster to be a list of dict
__lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def lowerCAmelCase ( self : Any , __a : int ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase , __lowercase : Union[str, Any] = element
__lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ):
__lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[str] = get_tokens(lowerCAmelCase_ )
__lowercase : Dict = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : List[str] = None
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = []
for elementa in cluster:
__lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase : Dict = 1
extremes.append(lowerCAmelCase_ )
return extremes
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
global _shared_dataset
__lowercase : Tuple = dataset
__lowercase : Optional[int] = []
__lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ):
__lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase : int = {}
__lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase : Optional[Any] = element
__lowercase : int = duplicate_indices - set(extreme_dict.keys() )
__lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(lowerCAmelCase_ )}" )
print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" )
print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" )
return ds_filter, duplicate_clusters
| 306
| 1
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Any=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F"{torch_layer} layer.weight does not match"
__lowercase : Any = nn.Parameter(lowerCAmelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"{torch_layer} layer.bias does not match"
__lowercase : Tuple = nn.Parameter(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ):
# set torch weights for 1-to-1 comparison
__lowercase : str = np.asarray(weights[0] )
__lowercase : Optional[Any] = np.asarray(weights[1] )
__lowercase : Tuple = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] ):
# set torch weights for 1-to-1 comparison
__lowercase : Optional[int] = np.asarray(weights[0] )
__lowercase : List[str] = np.asarray(weights[1] )
__lowercase : Dict = np.asarray(weights[2] )
__lowercase : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] ):
# layernorm 1
__lowercase : Union[str, Any] = weights[0][0][0]
__lowercase : Optional[int] = np.asarray(layer_norm_a[0] )
__lowercase : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# lsh weights + output
__lowercase : Dict = weights[0][1]
if len(lowerCAmelCase_ ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
else:
set_layer_weights_in_torch_local(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
# intermediate weighs
__lowercase : Optional[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase_ ) == 4:
__lowercase : List[Any] = intermediate_weights[2]
# layernorm 2
__lowercase : Union[str, Any] = np.asarray(intermediate_weights[0][0] )
__lowercase : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# intermediate dense
__lowercase : List[Any] = np.asarray(intermediate_weights[1][0] )
__lowercase : str = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
# intermediate out
__lowercase : Dict = np.asarray(intermediate_weights[4][0] )
__lowercase : Dict = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] ):
# reformer model
__lowercase : str = torch_model.reformer
# word embeds
__lowercase : List[str] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase_ ) , )
if isinstance(weights[3] , lowerCAmelCase_ ):
__lowercase : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__lowercase : List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"{position_embeddings[emb_idx]} emb does not match"
__lowercase : Optional[Any] = nn.Parameter(torch.tensor(lowerCAmelCase_ ) )
__lowercase : Union[str, Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__lowercase : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# output layer norm
__lowercase : Union[str, Any] = np.asarray(weights[7][0] )
__lowercase : Optional[int] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# output embeddings
__lowercase : Tuple = np.asarray(weights[9][0] )
__lowercase : Tuple = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ):
# Initialise PyTorch model
__lowercase : Optional[int] = ReformerConfig.from_json_file(lowerCAmelCase_ )
print(F"Building PyTorch model from configuration: {config}" )
__lowercase : Dict = ReformerModelWithLMHead(lowerCAmelCase_ )
with open(lowerCAmelCase_ , """rb""" ) as f:
__lowercase : Tuple = pickle.load(lowerCAmelCase_ )["""weights"""]
set_model_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , config.hidden_size )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : List[str] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 306
|
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''image_processor''', '''feature_extractor''']
_A : List[Any] = '''TvltImageProcessor'''
_A : Optional[int] = '''TvltFeatureExtractor'''
def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__(image_processor=__a , feature_extractor=__a )
__lowercase : Union[str, Any] = image_processor
__lowercase : Tuple = feature_extractor
def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowercase : Tuple = None
if images is not None:
__lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
__lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
__lowercase : Optional[Any] = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
__lowercase : Tuple = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.image_processor.model_input_names
__lowercase : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 306
| 1
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int ):
__lowercase : Union[str, Any] = 0
__lowercase : Optional[int] = len(lowerCAmelCase_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__lowercase : str = i + 1
else:
__lowercase : Optional[Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 306
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : int = batch_size
__lowercase : Any = seq_length
__lowercase : str = is_training
__lowercase : str = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : int = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : Union[str, Any] = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : Tuple = scope
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
__lowercase : str = model(__a , attention_mask=__a )
__lowercase : List[Any] = model(__a )
__lowercase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowercase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Any = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[str] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = False
_A : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = ()
_A : List[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Optional[Any] = True
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = EsmModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : List[str] = EsmEmbeddings(config=__a )
__lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowercase : int = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : Optional[Any] = EsmEmbeddings(config=__a )
__lowercase : Optional[int] = torch.empty(2 , 4 , 30 )
__lowercase : Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( __a ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[str] = model(__a )[0]
__lowercase : Union[str, Any] = 33
__lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
__lowercase : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : Any = model(__a )[0]
# compare the actual values for a slice.
__lowercase : int = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.25) = }''')
print(f'''{price_plus_tax(1_25.50, 0.05) = }''')
| 306
|
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 306
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = StableUnCLIPImgaImgPipeline
_A : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
_A : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
_A : Optional[int] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_A : Union[str, Any] = frozenset([] )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = 32
__lowercase : int = embedder_hidden_size
# image encoding components
__lowercase : Dict = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
__lowercase : Tuple = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__a , projection_dim=__a , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
__lowercase : Tuple = StableUnCLIPImageNormalizer(embedding_dim=__a )
__lowercase : Union[str, Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
__lowercase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
__lowercase : Union[str, Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__lowercase : List[str] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__a , layers_per_block=1 , upcast_attention=__a , use_linear_projection=__a , )
torch.manual_seed(0 )
__lowercase : int = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.00085 , beta_end=0.012 , prediction_type="""v_prediction""" , set_alpha_to_one=__a , steps_offset=1 , )
torch.manual_seed(0 )
__lowercase : Union[str, Any] = AutoencoderKL()
__lowercase : Tuple = {
# image encoding components
"""feature_extractor""": feature_extractor,
"""image_encoder""": image_encoder.eval(),
# image noising components
"""image_normalizer""": image_normalizer.eval(),
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder.eval(),
"""unet""": unet.eval(),
"""scheduler""": scheduler,
"""vae""": vae.eval(),
}
return components
def lowerCAmelCase ( self : List[Any] , __a : int , __a : List[str]=0 , __a : Union[str, Any]=True ) -> Union[str, Any]:
"""simple docstring"""
if str(__a ).startswith("""mps""" ):
__lowercase : Optional[int] = torch.manual_seed(__a )
else:
__lowercase : List[Any] = torch.Generator(device=__a ).manual_seed(__a )
__lowercase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
if pil_image:
__lowercase : Optional[Any] = input_image * 0.5 + 0.5
__lowercase : List[str] = input_image.clamp(0 , 1 )
__lowercase : Any = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__lowercase : Optional[Any] = DiffusionPipeline.numpy_to_pil(__a )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase : int = self.get_dummy_components()
__lowercase : int = StableUnCLIPImgaImgPipeline(**__a )
__lowercase : Optional[int] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__lowercase : Optional[Any] = self.get_dummy_inputs(__a )
inputs.update({"""image_embeds""": None} )
__lowercase : Union[str, Any] = sd_pipe(**__a ).images
__lowercase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowercase : Union[str, Any] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = torch_device in ["""cpu""", """mps"""]
self._test_attention_slicing_forward_pass(test_max_difference=__a )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=__a )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__a )
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__lowercase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
__lowercase : Any = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase : List[Any] = pipe(__a , """anime turle""" , generator=__a , output_type="""np""" )
__lowercase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
__lowercase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
__lowercase : Dict = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase : str = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase : Union[str, Any] = pipe(__a , """anime turle""" , generator=__a , output_type="""np""" )
__lowercase : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__a , __a )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
__lowercase : List[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowercase : Dict = pipe(
__a , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
__lowercase : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 306
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = (DPMSolverSDEScheduler,)
_A : Dict = 10
def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**__a )
return config
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.scheduler_classes[0]
__lowercase : List[str] = self.get_scheduler_config()
__lowercase : Any = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[Any] = self.dummy_model()
__lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Optional[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Optional[Any] = scheduler.step(__a , __a , __a )
__lowercase : str = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowercase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[int] = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Dict = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Dict = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[int] = model(__a , __a )
__lowercase : Optional[int] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : List[str] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config()
__lowercase : Optional[int] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : int = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase : int = scheduler.scale_model_input(__a , __a )
__lowercase : List[str] = model(__a , __a )
__lowercase : List[str] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : List[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.scheduler_classes[0]
__lowercase : List[Any] = self.get_scheduler_config()
__lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : List[str] = self.dummy_model()
__lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
__lowercase : str = sample.to(__a )
for t in scheduler.timesteps:
__lowercase : List[Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Any = scheduler.step(__a , __a , __a )
__lowercase : Optional[Any] = output.prev_sample
__lowercase : Any = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 306
| 1
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 306
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : str = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase : List[str] = args.per_device_eval_batch_size
lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : List[str] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ )
# start time
__lowercase : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowercase : int = time.time()
__lowercase : Union[str, Any] = end_time - start_time
__lowercase : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names
lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def snake_case_ ( lowerCAmelCase_ : int ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowercase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowercase : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowercase : Any = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ )
__lowercase : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowercase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowercase : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCamelCase : Tuple = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase : Union[str, Any] = default_data_collator
lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowercase : int = postprocess_qa_predictions(
examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowercase : Optional[int] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ )
lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def snake_case_ ( lowerCAmelCase_ : str ):
return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase : int = 0.0
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = timeit.default_timer()
lowerCamelCase : List[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : Dict = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 306
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase : Tuple = tempfile.mkdtemp()
# fmt: off
__lowercase : str = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__lowercase : str = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : str = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__lowercase : str = {"""unk_token""": """<unk>"""}
__lowercase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
__lowercase : Optional[int] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
__lowercase : List[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__a , __a )
def lowerCAmelCase ( self : List[str] , **__a : Optional[int] ) -> Any:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Any , **__a : Any ) -> str:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Optional[int] , **__a : Any ) -> Tuple:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase : Dict = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_rust_tokenizer()
__lowercase : str = self.get_image_processor()
__lowercase : Tuple = CLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowercase : Optional[int] = CLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : Dict = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowercase : str = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__lowercase : Optional[Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.get_image_processor()
__lowercase : Optional[int] = self.get_tokenizer()
__lowercase : List[Any] = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowercase : Optional[int] = self.prepare_image_inputs()
__lowercase : Union[str, Any] = image_processor(__a , return_tensors="""np""" )
__lowercase : Union[str, Any] = processor(images=__a , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.get_image_processor()
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : List[str] = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowercase : int = """lower newer"""
__lowercase : Optional[int] = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = self.get_image_processor()
__lowercase : str = self.get_tokenizer()
__lowercase : Union[str, Any] = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowercase : Any = """lower newer"""
__lowercase : str = self.prepare_image_inputs()
__lowercase : List[Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : int = self.get_image_processor()
__lowercase : Any = self.get_tokenizer()
__lowercase : Union[str, Any] = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowercase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : Union[str, Any] = processor.batch_decode(__a )
__lowercase : int = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_image_processor()
__lowercase : Any = self.get_tokenizer()
__lowercase : int = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowercase : Optional[int] = """lower newer"""
__lowercase : List[Any] = self.prepare_image_inputs()
__lowercase : str = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 306
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''nllb-moe'''
_A : List[str] = ['''past_key_values''']
_A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any:
"""simple docstring"""
__lowercase : int = vocab_size
__lowercase : List[Any] = max_position_embeddings
__lowercase : Tuple = d_model
__lowercase : str = encoder_ffn_dim
__lowercase : List[str] = encoder_layers
__lowercase : int = encoder_attention_heads
__lowercase : List[Any] = decoder_ffn_dim
__lowercase : int = decoder_layers
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : Union[str, Any] = dropout
__lowercase : str = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : List[Any] = activation_function
__lowercase : List[str] = init_std
__lowercase : Optional[int] = encoder_layerdrop
__lowercase : str = decoder_layerdrop
__lowercase : Dict = use_cache
__lowercase : Optional[Any] = encoder_layers
__lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : List[Any] = router_z_loss_coef
__lowercase : Tuple = router_aux_loss_coef
__lowercase : str = decoder_sparse_step
__lowercase : Any = encoder_sparse_step
__lowercase : str = num_experts
__lowercase : List[Any] = expert_capacity
__lowercase : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase : Optional[int] = router_dtype
__lowercase : Any = router_ignore_padding_tokens
__lowercase : Optional[Any] = batch_prioritized_routing
__lowercase : str = second_expert_policy
__lowercase : List[str] = normalize_router_prob_before_dropping
__lowercase : List[Any] = moe_eval_capacity_token_fraction
__lowercase : List[str] = moe_token_dropout
__lowercase : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 306
| 1
|
from collections.abc import Sequence
def snake_case_ ( lowerCAmelCase_ : Sequence[int] | None = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__lowercase : str = nums[0]
for i in range(1 , len(lowerCAmelCase_ ) ):
__lowercase : Dict = nums[i]
__lowercase : List[Any] = max(lowerCAmelCase_ , ans + num , lowerCAmelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase : Optional[Any] = int(input('''Enter number of elements : ''').strip())
lowerCamelCase : Optional[Any] = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 306
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[Any] = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['''PoolFormerFeatureExtractor''']
lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 306
| 1
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[Any] , __a : pyspark.sql.DataFrame , __a : Optional[NamedSplit] = None , __a : Optional[Features] = None , __a : bool = True , __a : str = None , __a : bool = False , __a : str = None , __a : bool = True , __a : str = "arrow" , **__a : str , ) -> Dict:
"""simple docstring"""
super().__init__(
split=__a , features=__a , cache_dir=__a , keep_in_memory=__a , streaming=__a , **__a , )
__lowercase : int = load_from_cache_file
__lowercase : List[Any] = file_format
__lowercase : Dict = Spark(
df=__a , features=__a , cache_dir=__a , working_dir=__a , **__a , )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__lowercase : List[str] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__a , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 306
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__lowercase : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowercase : Optional[Any] = model(__a )["""last_hidden_state"""]
__lowercase : Any = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__lowercase : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 306
| 1
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCamelCase : Optional[int] = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : str
_A : Optional[str] = None
_A : Optional[Union[str, int]] = None
_A : Optional[Union[str, int]] = None
_A : Optional[Union[str, int]] = None
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase , __lowercase : str = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return self.major, self.minor, self.patch
def lowerCAmelCase ( self : Optional[Any] , __a : str ) -> Tuple:
"""simple docstring"""
if isinstance(__a , __a ):
return Version(__a )
elif isinstance(__a , __a ):
return other
raise TypeError(F"{other} (type {type(__a )}) cannot be compared to version." )
def __eq__( self : Optional[Any] , __a : int ) -> Any:
"""simple docstring"""
try:
__lowercase : List[str] = self._validate_operand(__a )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : int , __a : List[Any] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = self._validate_operand(__a )
return self.tuple < other.tuple
def __hash__( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowerCAmelCase ( cls : Optional[int] , __a : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return self.version_str
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
__lowercase : Optional[Any] = _VERSION_REG.match(lowerCAmelCase_ )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(lowerCAmelCase_ ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
return ".".join(str(lowerCAmelCase_ ) for v in version_tuple )
| 306
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
__lowercase : str = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Tuple = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Optional[Any] = True
if a[i].islower():
__lowercase : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 306
|
from scipy.stats import spearmanr
import datasets
lowerCamelCase : List[str] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCamelCase : List[str] = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCamelCase : Union[str, Any] = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = spearmanr(__a , __a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 306
| 1
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : jnp.ndarray
_A : jnp.ndarray
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
_A : int
_A : Tuple[int] = (16, 32, 96, 256)
_A : jnp.dtype = jnp.floataa
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowercase : Tuple = []
for i in range(len(self.block_out_channels ) - 1 ):
__lowercase : Any = self.block_out_channels[i]
__lowercase : Union[str, Any] = self.block_out_channels[i + 1]
__lowercase : Union[str, Any] = nn.Conv(
__a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__a )
__lowercase : int = nn.Conv(
__a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__a )
__lowercase : str = blocks
__lowercase : Dict = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = self.conv_in(__a )
__lowercase : int = nn.silu(__a )
for block in self.blocks:
__lowercase : Optional[int] = block(__a )
__lowercase : List[Any] = nn.silu(__a )
__lowercase : Optional[int] = self.conv_out(__a )
return embedding
@flax_register_to_config
class lowerCAmelCase ( nn.Module , __a , __a ):
'''simple docstring'''
_A : int = 32
_A : int = 4
_A : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
_A : Union[bool, Tuple[bool]] = False
_A : Tuple[int] = (320, 640, 1280, 1280)
_A : int = 2
_A : Union[int, Tuple[int]] = 8
_A : Optional[Union[int, Tuple[int]]] = None
_A : int = 1280
_A : float = 0.0
_A : bool = False
_A : jnp.dtype = jnp.floataa
_A : bool = True
_A : int = 0
_A : str = "rgb"
_A : Tuple[int] = (16, 32, 96, 256)
def lowerCAmelCase ( self : Any , __a : jax.random.KeyArray ) -> FrozenDict:
"""simple docstring"""
__lowercase : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
__lowercase : int = jnp.zeros(__a , dtype=jnp.floataa )
__lowercase : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
__lowercase : Tuple = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__lowercase : List[str] = (1, 3, self.sample_size * 8, self.sample_size * 8)
__lowercase : Dict = jnp.zeros(__a , dtype=jnp.floataa )
__lowercase , __lowercase : List[Any] = jax.random.split(__a )
__lowercase : Any = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__a , __a , __a , __a , __a )["params"]
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.block_out_channels
__lowercase : Optional[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__lowercase : List[Any] = self.num_attention_heads or self.attention_head_dim
# input
__lowercase : Any = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__lowercase : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__lowercase : List[Any] = FlaxTimestepEmbedding(__a , dtype=self.dtype )
__lowercase : List[str] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__lowercase : Optional[Any] = self.only_cross_attention
if isinstance(__a , __a ):
__lowercase : Tuple = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__a , __a ):
__lowercase : Optional[Any] = (num_attention_heads,) * len(self.down_block_types )
# down
__lowercase : Union[str, Any] = []
__lowercase : int = []
__lowercase : List[str] = block_out_channels[0]
__lowercase : List[Any] = nn.Conv(
__a , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__a )
for i, down_block_type in enumerate(self.down_block_types ):
__lowercase : Dict = output_channel
__lowercase : Optional[Any] = block_out_channels[i]
__lowercase : Union[str, Any] = i == len(__a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__lowercase : List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=__a , out_channels=__a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__lowercase : Any = FlaxDownBlockaD(
in_channels=__a , out_channels=__a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__a )
for _ in range(self.layers_per_block ):
__lowercase : int = nn.Conv(
__a , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__a )
if not is_final_block:
__lowercase : List[str] = nn.Conv(
__a , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__a )
__lowercase : Optional[Any] = down_blocks
__lowercase : Optional[int] = controlnet_down_blocks
# mid
__lowercase : Tuple = block_out_channels[-1]
__lowercase : Tuple = FlaxUNetMidBlockaDCrossAttn(
in_channels=__a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__lowercase : str = nn.Conv(
__a , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : int , __a : Any , __a : Optional[int] , __a : str , __a : Tuple , __a : float = 1.0 , __a : bool = True , __a : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
__lowercase : List[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__lowercase : Union[str, Any] = jnp.flip(__a , axis=1 )
# 1. time
if not isinstance(__a , jnp.ndarray ):
__lowercase : int = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__a , jnp.ndarray ) and len(timesteps.shape ) == 0:
__lowercase : str = timesteps.astype(dtype=jnp.floataa )
__lowercase : Optional[int] = jnp.expand_dims(__a , 0 )
__lowercase : Union[str, Any] = self.time_proj(__a )
__lowercase : str = self.time_embedding(__a )
# 2. pre-process
__lowercase : Any = jnp.transpose(__a , (0, 2, 3, 1) )
__lowercase : Any = self.conv_in(__a )
__lowercase : Optional[int] = jnp.transpose(__a , (0, 2, 3, 1) )
__lowercase : Any = self.controlnet_cond_embedding(__a )
sample += controlnet_cond
# 3. down
__lowercase : int = (sample,)
for down_block in self.down_blocks:
if isinstance(__a , __a ):
__lowercase , __lowercase : Dict = down_block(__a , __a , __a , deterministic=not train )
else:
__lowercase , __lowercase : int = down_block(__a , __a , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__lowercase : Optional[Any] = self.mid_block(__a , __a , __a , deterministic=not train )
# 5. contronet blocks
__lowercase : Union[str, Any] = ()
for down_block_res_sample, controlnet_block in zip(__a , self.controlnet_down_blocks ):
__lowercase : int = controlnet_block(__a )
controlnet_down_block_res_samples += (down_block_res_sample,)
__lowercase : int = controlnet_down_block_res_samples
__lowercase : Optional[int] = self.controlnet_mid_block(__a )
# 6. scaling
__lowercase : List[str] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__a , mid_block_res_sample=__a )
| 306
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 306
| 1
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ):
return params[F"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int="attention" ):
__lowercase : Any = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
__lowercase : str = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__lowercase : Optional[Any] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
__lowercase : List[Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__lowercase : int = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
__lowercase : List[Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__lowercase : int = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
__lowercase : str = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]=False ):
if split_mlp_wi:
__lowercase : str = params[F"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
__lowercase : int = params[F"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
__lowercase : Optional[Any] = (wi_a, wi_a)
else:
__lowercase : Any = params[F"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
__lowercase : Optional[Any] = params[F"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ):
return params[F"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def snake_case_ ( lowerCAmelCase_ : dict , *, lowerCAmelCase_ : int , lowerCAmelCase_ : bool , lowerCAmelCase_ : bool = False ):
__lowercase : Any = traverse_util.flatten_dict(variables["""target"""] )
__lowercase : str = {"""/""".join(lowerCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowercase : Optional[int] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase_ )
__lowercase : str = collections.OrderedDict()
# Shared embeddings.
__lowercase : Optional[int] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
__lowercase : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" , """pre_attention_layer_norm""" )
__lowercase , __lowercase , __lowercase , __lowercase : str = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" , """attention""" )
__lowercase : Any = layer_norm
__lowercase : List[Any] = k.T
__lowercase : Tuple = o.T
__lowercase : Tuple = q.T
__lowercase : Optional[Any] = v.T
# Block i, layer 1 (MLP).
__lowercase : List[str] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" , """pre_mlp_layer_norm""" )
__lowercase , __lowercase : Union[str, Any] = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" , lowerCAmelCase_ )
__lowercase : List[Any] = layer_norm
if split_mlp_wi:
__lowercase : Any = wi[0].T
__lowercase : List[str] = wi[1].T
else:
__lowercase : str = wi.T
__lowercase : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowercase : Optional[int] = tax_relpos_bias_lookup(
lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" ).T
__lowercase : Optional[int] = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
__lowercase : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , """encoder""" ).T
__lowercase : List[Any] = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
__lowercase : Any = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """pre_self_attention_layer_norm""" )
__lowercase , __lowercase , __lowercase , __lowercase : List[str] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """self_attention""" )
__lowercase : Union[str, Any] = layer_norm
__lowercase : List[Any] = k.T
__lowercase : List[str] = o.T
__lowercase : int = q.T
__lowercase : Dict = v.T
# Block i, layer 1 (Cross Attention).
__lowercase : Tuple = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """pre_cross_attention_layer_norm""" )
__lowercase , __lowercase , __lowercase , __lowercase : str = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """encoder_decoder_attention""" )
__lowercase : int = layer_norm
__lowercase : Optional[Any] = k.T
__lowercase : Optional[int] = o.T
__lowercase : List[Any] = q.T
__lowercase : Optional[Any] = v.T
# Block i, layer 2 (MLP).
__lowercase : Dict = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """pre_mlp_layer_norm""" )
__lowercase , __lowercase : Union[str, Any] = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , lowerCAmelCase_ )
__lowercase : List[str] = layer_norm
if split_mlp_wi:
__lowercase : Dict = wi[0].T
__lowercase : Optional[Any] = wi[1].T
else:
__lowercase : Dict = wi.T
__lowercase : Optional[int] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowercase : int = tax_relpos_bias_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" ).T
__lowercase : Optional[Any] = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowercase : Dict = old["""decoder/logits_dense/kernel"""].T
return new
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : bool ):
__lowercase : Any = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowercase : int = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowercase : Optional[Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__lowercase : Dict = state_dict["""shared.weight"""]
return state_dict
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
__lowercase : List[Any] = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
__lowercase : Tuple = convert_tax_to_pytorch(
lowerCAmelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase_ , scalable_attention=lowerCAmelCase_ )
__lowercase : int = make_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , ):
__lowercase : Union[str, Any] = MTaConfig.from_json_file(lowerCAmelCase_ )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowercase : Tuple = UMTaEncoderModel(lowerCAmelCase_ )
else:
__lowercase : Union[str, Any] = UMTaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowerCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase_ )
print("""Done""" )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
lowerCamelCase : List[str] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 306
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 306
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase : Optional[Any] = '''src/diffusers'''
lowerCamelCase : List[str] = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowerCamelCase : Tuple = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCamelCase : List[Any] = spec.loader.load_module()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] ):
return line.startswith(lowerCAmelCase_ ) or len(lowerCAmelCase_ ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , lowerCAmelCase_ ) is not None
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : Union[str, Any] = object_name.split(""".""" )
__lowercase : Any = 0
# First let's find the module where our object lives.
__lowercase : List[Any] = parts[i]
while i < len(lowerCAmelCase_ ) and not os.path.isfile(os.path.join(lowerCAmelCase_ , F"{module}.py" ) ):
i += 1
if i < len(lowerCAmelCase_ ):
__lowercase : List[str] = os.path.join(lowerCAmelCase_ , parts[i] )
if i >= len(lowerCAmelCase_ ):
raise ValueError(F"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(lowerCAmelCase_ , F"{module}.py" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowercase : Optional[int] = f.readlines()
# Now let's find the class / func in the code!
__lowercase : List[str] = """"""
__lowercase : Dict = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase_ ) and re.search(rF"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase_ ):
raise ValueError(F" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__lowercase : List[Any] = line_index
while line_index < len(lowerCAmelCase_ ) and _should_continue(lines[line_index] , lowerCAmelCase_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowercase : Tuple = lines[start_index:line_index]
return "".join(lowerCAmelCase_ )
lowerCamelCase : int = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowerCamelCase : Any = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowerCamelCase : Any = re.compile(r'''<FILL\s+[^>]*>''')
def snake_case_ ( lowerCAmelCase_ : Any ):
__lowercase : int = code.split("""\n""" )
__lowercase : str = 0
while idx < len(lowerCAmelCase_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase_ ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = len(get_indent(lowerCAmelCase_ ) ) > 0
if has_indent:
__lowercase : Optional[Any] = F"class Bla:\n{code}"
__lowercase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase_ )
__lowercase : Optional[Any] = black.format_str(lowerCAmelCase_ , mode=lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = style_docstrings_in_code(lowerCAmelCase_ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any]=False ):
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__lowercase : List[Any] = f.readlines()
__lowercase : Optional[int] = []
__lowercase : str = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase_ ):
__lowercase : str = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__lowercase , __lowercase , __lowercase : List[str] = search.groups()
__lowercase : int = find_code_in_diffusers(lowerCAmelCase_ )
__lowercase : Dict = get_indent(lowerCAmelCase_ )
__lowercase : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
__lowercase : Any = theoretical_indent
__lowercase : List[str] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__lowercase : Any = True
while line_index < len(lowerCAmelCase_ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase_ ):
break
__lowercase : int = lines[line_index]
__lowercase : Union[str, Any] = _should_continue(lowerCAmelCase_ , lowerCAmelCase_ ) and re.search(F"^{indent}# End copy" , lowerCAmelCase_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowercase : Optional[int] = lines[start_index:line_index]
__lowercase : Dict = """""".join(lowerCAmelCase_ )
# Remove any nested `Copied from` comments to avoid circular copies
__lowercase : Union[str, Any] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(lowerCAmelCase_ ) is None]
__lowercase : Optional[Any] = """\n""".join(lowerCAmelCase_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase_ ) > 0:
__lowercase : Tuple = replace_pattern.replace("""with""" , """""" ).split(""",""" )
__lowercase : int = [_re_replace_pattern.search(lowerCAmelCase_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__lowercase , __lowercase , __lowercase : Dict = pattern.groups()
__lowercase : Optional[Any] = re.sub(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if option.strip() == "all-casing":
__lowercase : Optional[Any] = re.sub(obja.lower() , obja.lower() , lowerCAmelCase_ )
__lowercase : List[str] = re.sub(obja.upper() , obja.upper() , lowerCAmelCase_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__lowercase : List[Any] = blackify(lines[start_index - 1] + theoretical_code )
__lowercase : Optional[int] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__lowercase : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__lowercase : Optional[int] = start_index + 1
if overwrite and len(lowerCAmelCase_ ) > 0:
# Warn the user a file has been modified.
print(F"Detected changes, rewriting {filename}." )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lowerCAmelCase_ )
return diffs
def snake_case_ ( lowerCAmelCase_ : bool = False ):
__lowercase : Dict = glob.glob(os.path.join(lowerCAmelCase_ , """**/*.py""" ) , recursive=lowerCAmelCase_ )
__lowercase : List[str] = []
for filename in all_files:
__lowercase : Any = is_copy_consistent(lowerCAmelCase_ , lowerCAmelCase_ )
diffs += [F"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(lowerCAmelCase_ ) > 0:
__lowercase : Dict = """\n""".join(lowerCAmelCase_ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase : Optional[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 306
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 306
| 1
|
import heapq
import sys
import numpy as np
lowerCamelCase : Dict = tuple[int, int]
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : Any = []
__lowercase : Optional[Any] = set()
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
if not self.empty():
return self.elements[0][0]
else:
return float("""inf""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
return len(self.elements ) == 0
def lowerCAmelCase ( self : Any , __a : Union[str, Any] , __a : Any ) -> Dict:
"""simple docstring"""
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(__a )
else:
# update
# print("update", item)
__lowercase : List[str] = []
((__lowercase) , (__lowercase)) : str = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((__lowercase) , (__lowercase)) : str = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def lowerCAmelCase ( self : Tuple , __a : List[Any] ) -> int:
"""simple docstring"""
if item in self.set:
self.set.remove(__a )
__lowercase : List[str] = []
((__lowercase) , (__lowercase)) : Optional[Any] = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((__lowercase) , (__lowercase)) : Optional[int] = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.elements[0][1]
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
((__lowercase) , (__lowercase)) : Union[str, Any] = heapq.heappop(self.elements )
self.set.remove(__a )
return (priority, item)
def snake_case_ ( lowerCAmelCase_ : TPos , lowerCAmelCase_ : TPos ):
# euclidean distance
__lowercase : Union[str, Any] = np.array(lowerCAmelCase_ )
__lowercase : int = np.array(lowerCAmelCase_ )
return np.linalg.norm(a - b )
def snake_case_ ( lowerCAmelCase_ : TPos , lowerCAmelCase_ : TPos ):
# integer division by time variable
return consistent_heuristic(lowerCAmelCase_ , lowerCAmelCase_ ) // t
def snake_case_ ( lowerCAmelCase_ : TPos , lowerCAmelCase_ : TPos ):
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def snake_case_ ( lowerCAmelCase_ : TPos , lowerCAmelCase_ : int , lowerCAmelCase_ : TPos , lowerCAmelCase_ : dict[TPos, float] ):
__lowercase : Optional[Any] = g_function[start] + Wa * heuristics[i](lowerCAmelCase_ , lowerCAmelCase_ )
return ans
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] ):
__lowercase : Optional[int] = np.chararray((n, n) )
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
__lowercase : Union[str, Any] = """*"""
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if (j, (n - 1) - i) in blocks:
__lowercase : Tuple = """#"""
__lowercase : int = """-"""
__lowercase : List[str] = back_pointer[goal]
while x != start:
((__lowercase) , (__lowercase)) : Optional[Any] = x
# print(x)
__lowercase : Union[str, Any] = """-"""
__lowercase : int = back_pointer[x]
__lowercase : List[str] = """-"""
for i in range(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=""" """ )
print("""<-- End position""" , end=""" """ )
else:
print(grid[i][j] , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
print("""PATH TAKEN BY THE ALGORITHM IS:-""" )
__lowercase : List[str] = back_pointer[goal]
while x != start:
print(lowerCAmelCase_ , end=""" """ )
__lowercase : Any = back_pointer[x]
print(lowerCAmelCase_ )
sys.exit()
def snake_case_ ( lowerCAmelCase_ : TPos ):
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , ):
for itera in range(lowerCAmelCase_ ):
open_list[itera].remove_element(lowerCAmelCase_ )
# print("s", s)
# print("j", j)
((__lowercase) , (__lowercase)) : Any = s
__lowercase : List[str] = (x - 1, y)
__lowercase : Dict = (x + 1, y)
__lowercase : Optional[Any] = (x, y + 1)
__lowercase : Optional[int] = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCAmelCase_ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCAmelCase_ )
__lowercase : Optional[Any] = -1
__lowercase : Dict = float("""inf""" )
if valid(lowerCAmelCase_ ) and g_function[neighbours] > g_function[s] + 1:
__lowercase : Dict = g_function[s] + 1
__lowercase : Dict = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCAmelCase_ , key(lowerCAmelCase_ , 0 , lowerCAmelCase_ , lowerCAmelCase_ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCAmelCase_ ):
if key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) <= Wa * key(
lowerCAmelCase_ , 0 , lowerCAmelCase_ , lowerCAmelCase_ ):
open_list[j].put(
lowerCAmelCase_ , key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
def snake_case_ ( ):
__lowercase : Tuple = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
lowerCamelCase : int = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
lowerCamelCase : Optional[int] = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
lowerCamelCase : int = make_common_ground()
lowerCamelCase : List[str] = blocks_blk
# hyper parameters
lowerCamelCase : List[str] = 1
lowerCamelCase : Dict = 1
lowerCamelCase : Any = 20
lowerCamelCase : Union[str, Any] = 3 # one consistent and two other inconsistent
# start and end destination
lowerCamelCase : Tuple = (0, 0)
lowerCamelCase : str = (n - 1, n - 1)
lowerCamelCase : List[str] = 1
def snake_case_ ( lowerCAmelCase_ : TPos , lowerCAmelCase_ : TPos , lowerCAmelCase_ : int ):
__lowercase : Any = {start: 0, goal: float("""inf""" )}
__lowercase : List[Any] = {start: -1, goal: -1}
__lowercase : Tuple = []
__lowercase : Dict = set()
for i in range(lowerCAmelCase_ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCAmelCase_ , key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowercase : list[int] = []
__lowercase : list[int] = []
while open_list[0].minkey() < float("""inf""" ):
for i in range(1 , lowerCAmelCase_ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
__lowercase , __lowercase : Tuple = open_list[i].top_show()
visited.add(lowerCAmelCase_ )
expand_state(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
close_list_inad.append(lowerCAmelCase_ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float("""inf""" ):
do_something(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
__lowercase : List[str] = open_list[0].top_show()
visited.add(lowerCAmelCase_ )
expand_state(
lowerCAmelCase_ , 0 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
close_list_anchor.append(lowerCAmelCase_ )
print("""No path found to goal""" )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCAmelCase_ ):
if (j, i) in blocks:
print("""#""" , end=""" """ )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print("""*""" , end=""" """ )
else:
print("""-""" , end=""" """ )
else:
print("""*""" , end=""" """ )
if (j, i) == (n - 1, n - 1):
print("""<-- End position""" , end=""" """ )
print()
print("""^""" )
print("""Start position""" )
print()
print("""# is an obstacle""" )
print("""- is the path taken by algorithm""" )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 306
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 1
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 306
|
def snake_case_ ( lowerCAmelCase_ : int = 200 ):
__lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__lowercase : List[str] = [0] * (pence + 1)
__lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 306
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = 1
__lowercase : int = 3
__lowercase : Any = (32, 32)
__lowercase : Optional[int] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__a )
return image
@property
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=__a , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Optional[int] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
return CLIPTextModel(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase : List[str] = self.dummy_cond_unet_upscale
__lowercase : Optional[int] = DDPMScheduler()
__lowercase : str = DDIMScheduler(prediction_type="""v_prediction""" )
__lowercase : Optional[Any] = self.dummy_vae
__lowercase : Optional[int] = self.dummy_text_encoder
__lowercase : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase : Optional[Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowercase : Any = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
__lowercase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__lowercase : List[str] = """A painting of a squirrel eating a burger"""
__lowercase : str = torch.Generator(device=__a ).manual_seed(0 )
__lowercase : Optional[Any] = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
__lowercase : Dict = output.images
__lowercase : List[str] = torch.Generator(device=__a ).manual_seed(0 )
__lowercase : Union[str, Any] = sd_pipe(
[prompt] , image=__a , generator=__a , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=__a , )[0]
__lowercase : Optional[Any] = image[0, -3:, -3:, -1]
__lowercase : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
__lowercase : Tuple = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__lowercase : List[Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase : Any = self.dummy_cond_unet_upscale
__lowercase : Dict = DDPMScheduler()
__lowercase : str = DDIMScheduler(prediction_type="""v_prediction""" )
__lowercase : Union[str, Any] = self.dummy_vae
__lowercase : Union[str, Any] = self.dummy_text_encoder
__lowercase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase : Optional[Any] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowercase : str = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
__lowercase : List[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__lowercase : Union[str, Any] = """A painting of a squirrel eating a burger"""
__lowercase : List[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
__lowercase : Optional[int] = output.images
assert image.shape[0] == 2
__lowercase : List[Any] = torch.Generator(device=__a ).manual_seed(0 )
__lowercase : Any = sd_pipe(
[prompt] , image=__a , generator=__a , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
__lowercase : Optional[Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = self.dummy_cond_unet_upscale
__lowercase : str = DDPMScheduler()
__lowercase : Optional[Any] = DDIMScheduler(prediction_type="""v_prediction""" )
__lowercase : Any = self.dummy_vae
__lowercase : Dict = self.dummy_text_encoder
__lowercase : List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase : Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowercase : Tuple = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__lowercase : Dict = unet.half()
__lowercase : Optional[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
__lowercase : List[str] = StableDiffusionUpscalePipeline(
unet=__a , low_res_scheduler=__a , scheduler=__a , vae=__a , text_encoder=__a , tokenizer=__a , max_noise_level=350 , )
__lowercase : Optional[Any] = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
__lowercase : Dict = """A painting of a squirrel eating a burger"""
__lowercase : List[str] = torch.manual_seed(0 )
__lowercase : str = sd_pipe(
[prompt] , image=__a , generator=__a , num_inference_steps=2 , output_type="""np""" , ).images
__lowercase : Union[str, Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
__lowercase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__lowercase : Any = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
__lowercase : List[Any] = """stabilityai/stable-diffusion-x4-upscaler"""
__lowercase : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__lowercase : Any = """a cat sitting on a park bench"""
__lowercase : int = torch.manual_seed(0 )
__lowercase : Any = pipe(
prompt=__a , image=__a , generator=__a , output_type="""np""" , )
__lowercase : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__lowercase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
__lowercase : Optional[int] = """stabilityai/stable-diffusion-x4-upscaler"""
__lowercase : int = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing()
__lowercase : Optional[Any] = """a cat sitting on a park bench"""
__lowercase : int = torch.manual_seed(0 )
__lowercase : Tuple = pipe(
prompt=__a , image=__a , generator=__a , output_type="""np""" , )
__lowercase : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowercase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__lowercase : Union[str, Any] = """stabilityai/stable-diffusion-x4-upscaler"""
__lowercase : Any = StableDiffusionUpscalePipeline.from_pretrained(
__a , torch_dtype=torch.floataa , )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowercase : str = """a cat sitting on a park bench"""
__lowercase : Tuple = torch.manual_seed(0 )
__lowercase : int = pipe(
prompt=__a , image=__a , generator=__a , num_inference_steps=5 , output_type="""np""" , )
__lowercase : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 306
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a )
| 306
| 1
|
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowerCamelCase : Optional[Any] = '''
import os
'''
lowerCamelCase : Optional[int] = '''
def foo():
import os
return False
'''
lowerCamelCase : Dict = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
lowerCamelCase : List[Any] = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
lowerCamelCase : Any = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
lowerCamelCase : str = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
lowerCamelCase : List[Any] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
lowerCamelCase : Optional[int] = '''
import os
try:
import bar
except:
raise ValueError()
'''
lowerCamelCase : List[str] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
lowerCamelCase : Optional[int] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
lowerCamelCase : Dict = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple ):
__lowercase : int = os.path.join(lowerCAmelCase_ , """test_file.py""" )
with open(lowerCAmelCase_ , """w""" ) as _tmp_file:
_tmp_file.write(lowerCAmelCase_ )
__lowercase : int = get_imports(lowerCAmelCase_ )
assert parsed_imports == ["os"]
| 306
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
for attribute in key.split(""".""" ):
__lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__lowercase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase : Dict = value
elif weight_type == "weight_g":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowercase : List[Any] = value
elif weight_type == "bias":
__lowercase : int = value
elif weight_type == "running_mean":
__lowercase : List[Any] = value
elif weight_type == "running_var":
__lowercase : int = value
elif weight_type == "num_batches_tracked":
__lowercase : int = value
elif weight_type == "inv_freq":
__lowercase : Optional[Any] = value
else:
__lowercase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = []
__lowercase : Any = fairseq_model.state_dict()
__lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase : Tuple = True
if "*" in mapped_key:
__lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
__lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "pos_bias_u" in name:
__lowercase : Any = None
elif "pos_bias_v" in name:
__lowercase : Tuple = None
elif "weight_g" in name:
__lowercase : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__lowercase : Dict = """weight_v"""
elif "bias" in name:
__lowercase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase : str = """weight"""
elif "running_mean" in name:
__lowercase : str = """running_mean"""
elif "inv_freq" in name:
__lowercase : List[Any] = """inv_freq"""
elif "running_var" in name:
__lowercase : Any = """running_var"""
elif "num_batches_tracked" in name:
__lowercase : Any = """num_batches_tracked"""
else:
__lowercase : Optional[int] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowercase : int = name.split(""".""" )
__lowercase : Optional[Any] = int(items[0] )
__lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ):
if config_path is not None:
__lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" )
else:
__lowercase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase : Tuple = """rotary"""
if is_finetuned:
if dict_path:
__lowercase : Any = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : List[Any] = target_dict.pad_index
__lowercase : Optional[int] = target_dict.bos_index
__lowercase : List[Any] = target_dict.eos_index
__lowercase : List[str] = len(target_dict.symbols )
__lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase : int = 0
__lowercase : Any = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
__lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False
__lowercase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
__lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
__lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
__lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
__lowercase : Dict = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : Any = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 306
| 1
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Optional[Any] , __a : Tuple=13 , __a : str=64 , __a : List[Any]=2 , __a : int=3 , __a : Union[str, Any]=True , __a : Tuple=True , __a : Tuple=32 , __a : Dict=5 , __a : str=4 , __a : List[str]=37 , __a : int="gelu" , __a : Union[str, Any]=0.1 , __a : List[Any]=0.1 , __a : Optional[int]=10 , __a : int=0.02 , __a : List[str]=[1, 16, 4, 4] , __a : Tuple=None , ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = parent
__lowercase : str = batch_size
__lowercase : Tuple = image_size
__lowercase : Any = patch_size
__lowercase : int = num_channels
__lowercase : List[str] = is_training
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : List[str] = num_attention_heads
__lowercase : Optional[Any] = intermediate_size
__lowercase : Optional[int] = hidden_act
__lowercase : str = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Optional[Any] = type_sequence_label_size
__lowercase : Dict = initializer_range
__lowercase : Optional[Any] = scope
__lowercase : int = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__lowercase : List[Any] = (self.image_size // 32) ** 2
__lowercase : Tuple = num_patches + 1
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : int = None
if self.use_labels:
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : str = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__a , )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : List[Any] , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : str = ViTHybridModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Any = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : List[str] , __a : int , __a : Any , __a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.type_sequence_label_size
__lowercase : List[str] = ViTHybridForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Tuple = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[int] = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase : List[Any] = config_and_inputs
__lowercase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
_A : Optional[int] = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
_A : Any = False
_A : List[str] = False
_A : Any = False
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = ViTHybridModelTester(self )
__lowercase : Optional[int] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
pass
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase , __lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Union[str, Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[int] = model_class(__a )
__lowercase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : List[Any] = _config_zero_init(__a )
for model_class in self.all_model_classes:
__lowercase : Union[str, Any] = model_class(config=__a )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__lowercase : Optional[Any] = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[Any] = ViTHybridModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__a )
__lowercase : Union[str, Any] = self.default_image_processor
__lowercase : List[str] = prepare_img()
__lowercase : str = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Any = model(**__a )
# verify the logits
__lowercase : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : Optional[int] = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
@require_accelerate
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
__lowercase : List[str] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
__lowercase : int = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" )
__lowercase : Any = model(**__a )
__lowercase : Dict = outputs.logits
# model predicts one of the 1000 ImageNet classes
__lowercase : str = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 306
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
__lowercase : str = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 1
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a )
| 306
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
| 306
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.