code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def snake_case_ ( lowerCAmelCase_ : List[str] ):
return np.dot(_lowerCAmelCase , _lowerCAmelCase )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , *,
__a : float = np.inf , __a : str = "linear" , __a : float = 0.0 , ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = regularization
__lowercase : List[Any] = gamma
if kernel == "linear":
__lowercase : List[str] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError("""rbf kernel requires gamma""" )
if not isinstance(self.gamma , (float, int) ):
raise ValueError("""gamma must be float or int""" )
if not self.gamma > 0:
raise ValueError("""gamma must be > 0""" )
__lowercase : int = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
__lowercase : int = F"Unknown kernel: {kernel}"
raise ValueError(_lowerCAmelCase )
def lowerCAmelCase ( self : List[Any] , __a : ndarray , __a : ndarray ) -> Optional[int]:
"""simple docstring"""
return np.dot(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase ( self : List[Any] , __a : ndarray , __a : ndarray ) -> Tuple:
"""simple docstring"""
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def lowerCAmelCase ( self : Union[str, Any] , __a : list[ndarray] , __a : ndarray ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = observations
__lowercase : Union[str, Any] = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((__lowercase ) , ) : str = np.shape(_lowerCAmelCase )
def to_minimize(__a : ndarray ) -> float:
__lowercase : List[Any] = 0
((__lowercase ) , ) : List[str] = np.shape(_lowerCAmelCase )
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowerCAmelCase )
__lowercase : List[str] = LinearConstraint(_lowerCAmelCase , 0 , 0 )
__lowercase : str = Bounds(0 , self.regularization )
__lowercase : List[Any] = minimize(
_lowerCAmelCase , np.ones(_lowerCAmelCase ) , bounds=_lowerCAmelCase , constraints=[ly_contraint] ).x
__lowercase : Optional[Any] = l_star
# calculating mean offset of separation plane to points
__lowercase : Any = 0
for i in range(_lowerCAmelCase ):
for j in range(_lowerCAmelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
__lowercase : Union[str, Any] = s / n
def lowerCAmelCase ( self : Tuple , __a : ndarray ) -> str:
"""simple docstring"""
__lowercase : Any = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowerCAmelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
from scipy.stats import spearmanr
import datasets
lowerCamelCase : List[str] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCamelCase : List[str] = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCamelCase : Union[str, Any] = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = spearmanr(__a , __a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 306
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="""utf-8""" , check=_SCREAMING_SNAKE_CASE , )
assert hasattr(self , """env""" )
def lowerCAmelCase ( self : List[str] , __a : str=1 ) -> Dict:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=_SCREAMING_SNAKE_CASE , instance_type=self.instance_type , debugger_hook_config=_SCREAMING_SNAKE_CASE , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def lowerCAmelCase ( self : Tuple , __a : Any ) -> List[Any]:
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[int] = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowercase : List[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase : List[str] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
__lowercase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase : Optional[int] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _SCREAMING_SNAKE_CASE )
| 355
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 306
| 0
|
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case_ ( lowerCAmelCase_ : jnp.ndarray , lowerCAmelCase_ : int , lowerCAmelCase_ : float = 1 , lowerCAmelCase_ : float = 1 , lowerCAmelCase_ : float = 1.0e4 , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : float = 1.0 , ):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"Embedding dimension {embedding_dim} should be even"
__lowercase : Dict = float(embedding_dim // 2 )
__lowercase : Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
__lowercase : Optional[int] = min_timescale * jnp.exp(jnp.arange(lowerCAmelCase_ , dtype=jnp.floataa ) * -log_timescale_increment )
__lowercase : int = jnp.expand_dims(lowerCAmelCase_ , 1 ) * jnp.expand_dims(lowerCAmelCase_ , 0 )
# scale embeddings
__lowercase : Tuple = scale * emb
if flip_sin_to_cos:
__lowercase : Tuple = jnp.concatenate([jnp.cos(lowerCAmelCase_ ), jnp.sin(lowerCAmelCase_ )] , axis=1 )
else:
__lowercase : Optional[int] = jnp.concatenate([jnp.sin(lowerCAmelCase_ ), jnp.cos(lowerCAmelCase_ )] , axis=1 )
__lowercase : Optional[Any] = jnp.reshape(lowerCAmelCase_ , [jnp.shape(lowerCAmelCase_ )[0], embedding_dim] )
return signal
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
_A : Any = 32
_A : Union[str, Any] = jnp.floataa
@nn.compact
def __call__( self : str , __a : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(__a )
__lowercase : int = nn.silu(__a )
__lowercase : Optional[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(__a )
return temb
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
_A : Optional[Any] = 32
_A : int = False
_A : List[Any] = 1
@nn.compact
def __call__( self : int , __a : Any ) -> str:
"""simple docstring"""
return get_sinusoidal_embeddings(
__a , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 356
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 306
| 0
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : int = len(lowerCAmelCase_ )
__lowercase : int = len(lowerCAmelCase_ )
__lowercase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__lowercase : list = []
for char_count in range(lowerCAmelCase_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
print(alternative_string_arrange('''AB''', '''XYZ'''), end=''' ''')
| 357
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 306
| 0
|
def snake_case_ ( lowerCAmelCase_ : Dict ):
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
__lowercase : Union[str, Any] = """"""
while len(A__ ) % 3 != 0:
__lowercase : Optional[int] = """0""" + bin_string
__lowercase : Optional[int] = [
bin_string[index : index + 3]
for index in range(len(A__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__lowercase : Any = 0
for index, val in enumerate(A__ ):
oct_val += int(2 ** (2 - index) * int(A__ ) )
oct_string += str(A__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 358
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 0
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
return params[F"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int]="attention" ):
__lowercase : Optional[int] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
__lowercase : Dict = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__lowercase : str = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
__lowercase : Union[str, Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__lowercase : Any = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
__lowercase : Any = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__lowercase : Any = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
__lowercase : Optional[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]=False ):
if split_mlp_wi:
__lowercase : Any = params[F"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
__lowercase : List[Any] = params[F"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
__lowercase : Tuple = (wi_a, wi_a)
else:
__lowercase : Any = params[F"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
__lowercase : str = params[F"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int] ):
return params[F"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def snake_case_ ( lowerCAmelCase_ : List[str] , *, lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple = False ):
__lowercase : Optional[Any] = traverse_util.flatten_dict(variables["""target"""] )
__lowercase : Tuple = {'/'.join(_UpperCAmelCase ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowercase : Optional[Any] = 'encoder/encoder/mlp/wi_0/kernel' in old
print("""Split MLP:""" , _UpperCAmelCase )
__lowercase : int = collections.OrderedDict()
# Shared embeddings.
__lowercase : List[Any] = old['token_embedder/embedding']
# Encoder.
for i in range(_UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__lowercase : Dict = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , """encoder""" , """pre_attention_layer_norm""" )
__lowercase : Tuple = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , """encoder""" , """attention""" )
__lowercase : Optional[int] = layer_norm
__lowercase : str = k.T
__lowercase : Union[str, Any] = o.T
__lowercase : Any = q.T
__lowercase : Dict = v.T
# Block i, layer 1 (MLP).
__lowercase : Optional[Any] = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , """encoder""" , """pre_mlp_layer_norm""" )
__lowercase : List[str] = tax_mlp_lookup(_UpperCAmelCase , _UpperCAmelCase , """encoder""" , _UpperCAmelCase )
__lowercase : List[Any] = layer_norm
if split_mlp_wi:
__lowercase : str = wi[0].T
__lowercase : Any = wi[1].T
else:
__lowercase : int = wi.T
__lowercase : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowercase : str = tax_relpos_bias_lookup(
_UpperCAmelCase , _UpperCAmelCase , """encoder""" ).T
__lowercase : int = old['encoder/encoder_norm/scale']
if not scalable_attention:
__lowercase : Optional[Any] = tax_relpos_bias_lookup(
_UpperCAmelCase , 0 , """encoder""" ).T
__lowercase : List[str] = tax_relpos_bias_lookup(
_UpperCAmelCase , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(_UpperCAmelCase ):
# Block i, layer 0 (Self Attention).
__lowercase : Optional[int] = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , """decoder""" , """pre_self_attention_layer_norm""" )
__lowercase : Tuple = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , """decoder""" , """self_attention""" )
__lowercase : Dict = layer_norm
__lowercase : Any = k.T
__lowercase : str = o.T
__lowercase : List[Any] = q.T
__lowercase : Optional[int] = v.T
# Block i, layer 1 (Cross Attention).
__lowercase : Any = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , """decoder""" , """pre_cross_attention_layer_norm""" )
__lowercase : Dict = tax_attention_lookup(_UpperCAmelCase , _UpperCAmelCase , """decoder""" , """encoder_decoder_attention""" )
__lowercase : Optional[Any] = layer_norm
__lowercase : Union[str, Any] = k.T
__lowercase : List[Any] = o.T
__lowercase : Any = q.T
__lowercase : Dict = v.T
# Block i, layer 2 (MLP).
__lowercase : int = tax_layer_norm_lookup(_UpperCAmelCase , _UpperCAmelCase , """decoder""" , """pre_mlp_layer_norm""" )
__lowercase : Union[str, Any] = tax_mlp_lookup(_UpperCAmelCase , _UpperCAmelCase , """decoder""" , _UpperCAmelCase )
__lowercase : Optional[Any] = layer_norm
if split_mlp_wi:
__lowercase : Optional[int] = wi[0].T
__lowercase : List[str] = wi[1].T
else:
__lowercase : Any = wi.T
__lowercase : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowercase : str = tax_relpos_bias_lookup(_UpperCAmelCase , _UpperCAmelCase , """decoder""" ).T
__lowercase : List[Any] = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowercase : Union[str, Any] = old['decoder/logits_dense/kernel'].T
return new
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Any ):
__lowercase : Optional[Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowercase : Union[str, Any] = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowercase : Optional[Any] = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__lowercase : Optional[Any] = state_dict['shared.weight']
return state_dict
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Any ):
__lowercase : int = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
__lowercase : Tuple = convert_tax_to_pytorch(
_UpperCAmelCase , num_layers=config.num_layers , is_encoder_only=_UpperCAmelCase , scalable_attention=_UpperCAmelCase )
__lowercase : int = make_state_dict(_UpperCAmelCase , _UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str = False , lowerCAmelCase_ : Union[str, Any] = False , ):
__lowercase : str = MTaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowercase : Optional[int] = UMTaEncoderModel(_UpperCAmelCase )
else:
__lowercase : Union[str, Any] = UMTaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tax_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
# Verify that we can load the checkpoint.
model.from_pretrained(_UpperCAmelCase )
print("""Done""" )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
lowerCamelCase : Dict = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 359
|
def snake_case_ ( lowerCAmelCase_ : int = 200 ):
__lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__lowercase : List[str] = [0] * (pence + 1)
__lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 306
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = CycleDiffusionPipeline
_A : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
_A : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
_A : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
_A : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_A : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
__lowercase : Union[str, Any] = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
__lowercase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__lowercase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowercase : Dict = CLIPTextModel(__UpperCamelCase )
__lowercase : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowercase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : int=0 ) -> str:
"""simple docstring"""
__lowercase : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase )
__lowercase : List[str] = image / 2 + 0.5
if str(__UpperCamelCase ).startswith("""mps""" ):
__lowercase : Any = torch.manual_seed(__UpperCamelCase )
else:
__lowercase : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
__lowercase : List[str] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowercase : Optional[int] = self.get_dummy_components()
__lowercase : Optional[Any] = CycleDiffusionPipeline(**__UpperCamelCase )
__lowercase : Tuple = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__lowercase : Optional[int] = self.get_dummy_inputs(__UpperCamelCase )
__lowercase : int = pipe(**__UpperCamelCase )
__lowercase : Optional[Any] = output.images
__lowercase : List[str] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowercase : Dict = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = self.get_dummy_components()
for name, module in components.items():
if hasattr(__UpperCamelCase , """half""" ):
__lowercase : Any = module.half()
__lowercase : List[Any] = CycleDiffusionPipeline(**__UpperCamelCase )
__lowercase : Tuple = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
__lowercase : Any = self.get_dummy_inputs(__UpperCamelCase )
__lowercase : int = pipe(**__UpperCamelCase )
__lowercase : Any = output.images
__lowercase : Union[str, Any] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
__lowercase : str = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return super().test_inference_batch_single_identical()
@skip_mps
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
return super().test_save_load_optional_components()
@skip_mps
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
__lowercase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
__lowercase : List[str] = init_image.resize((512, 512) )
__lowercase : Any = """CompVis/stable-diffusion-v1-4"""
__lowercase : int = DDIMScheduler.from_pretrained(__UpperCamelCase , subfolder="""scheduler""" )
__lowercase : Optional[Any] = CycleDiffusionPipeline.from_pretrained(
__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
__lowercase : Optional[Any] = """A black colored car"""
__lowercase : str = """A blue colored car"""
__lowercase : Any = torch.manual_seed(0 )
__lowercase : Optional[int] = pipe(
prompt=__UpperCamelCase , source_prompt=__UpperCamelCase , image=__UpperCamelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCamelCase , output_type="""np""" , )
__lowercase : Optional[int] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5E-1
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
__lowercase : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
__lowercase : Tuple = init_image.resize((512, 512) )
__lowercase : Optional[Any] = """CompVis/stable-diffusion-v1-4"""
__lowercase : Dict = DDIMScheduler.from_pretrained(__UpperCamelCase , subfolder="""scheduler""" )
__lowercase : Optional[int] = CycleDiffusionPipeline.from_pretrained(__UpperCamelCase , scheduler=__UpperCamelCase , safety_checker=__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
pipe.enable_attention_slicing()
__lowercase : List[Any] = """A black colored car"""
__lowercase : Dict = """A blue colored car"""
__lowercase : str = torch.manual_seed(0 )
__lowercase : Tuple = pipe(
prompt=__UpperCamelCase , source_prompt=__UpperCamelCase , image=__UpperCamelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__UpperCamelCase , output_type="""np""" , )
__lowercase : str = output.images
assert np.abs(image - expected_image ).max() < 2E-2
| 360
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a )
| 306
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCamelCase : Tuple = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 361
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
for attribute in key.split(""".""" ):
__lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__lowercase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase : Dict = value
elif weight_type == "weight_g":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowercase : List[Any] = value
elif weight_type == "bias":
__lowercase : int = value
elif weight_type == "running_mean":
__lowercase : List[Any] = value
elif weight_type == "running_var":
__lowercase : int = value
elif weight_type == "num_batches_tracked":
__lowercase : int = value
elif weight_type == "inv_freq":
__lowercase : Optional[Any] = value
else:
__lowercase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = []
__lowercase : Any = fairseq_model.state_dict()
__lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase : Tuple = True
if "*" in mapped_key:
__lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
__lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "pos_bias_u" in name:
__lowercase : Any = None
elif "pos_bias_v" in name:
__lowercase : Tuple = None
elif "weight_g" in name:
__lowercase : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__lowercase : Dict = """weight_v"""
elif "bias" in name:
__lowercase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase : str = """weight"""
elif "running_mean" in name:
__lowercase : str = """running_mean"""
elif "inv_freq" in name:
__lowercase : List[Any] = """inv_freq"""
elif "running_var" in name:
__lowercase : Any = """running_var"""
elif "num_batches_tracked" in name:
__lowercase : Any = """num_batches_tracked"""
else:
__lowercase : Optional[int] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowercase : int = name.split(""".""" )
__lowercase : Optional[Any] = int(items[0] )
__lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ):
if config_path is not None:
__lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" )
else:
__lowercase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase : Tuple = """rotary"""
if is_finetuned:
if dict_path:
__lowercase : Any = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : List[Any] = target_dict.pad_index
__lowercase : Optional[int] = target_dict.bos_index
__lowercase : List[Any] = target_dict.eos_index
__lowercase : List[str] = len(target_dict.symbols )
__lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase : int = 0
__lowercase : Any = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
__lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False
__lowercase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
__lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
__lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
__lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
__lowercase : Dict = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : Any = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 306
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : Optional[Any] = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 362
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
__lowercase : str = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : int = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class lowerCAmelCase ( _A ):
'''simple docstring'''
_A : List[Any] = '''roc_bert'''
def __init__( self : Tuple , __a : int=30522 , __a : Optional[int]=768 , __a : int=12 , __a : Tuple=12 , __a : Tuple=3072 , __a : Any="gelu" , __a : List[Any]=0.1 , __a : Optional[int]=0.1 , __a : Optional[int]=512 , __a : str=2 , __a : int=0.02 , __a : int=1E-12 , __a : Optional[int]=True , __a : List[str]=0 , __a : List[str]="absolute" , __a : Any=None , __a : Optional[Any]=True , __a : Any=True , __a : Optional[Any]=768 , __a : Optional[int]=910 , __a : List[Any]=512 , __a : Optional[Any]=24858 , __a : Dict=True , **__a : str , ) -> int:
"""simple docstring"""
__lowercase : Tuple = vocab_size
__lowercase : Tuple = max_position_embeddings
__lowercase : Any = hidden_size
__lowercase : Any = num_hidden_layers
__lowercase : int = num_attention_heads
__lowercase : List[str] = intermediate_size
__lowercase : str = hidden_act
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : Any = attention_probs_dropout_prob
__lowercase : Dict = initializer_range
__lowercase : List[Any] = type_vocab_size
__lowercase : int = layer_norm_eps
__lowercase : Optional[int] = use_cache
__lowercase : Dict = enable_pronunciation
__lowercase : Tuple = enable_shape
__lowercase : Optional[Any] = pronunciation_embed_dim
__lowercase : Dict = pronunciation_vocab_size
__lowercase : List[Any] = shape_embed_dim
__lowercase : Optional[Any] = shape_vocab_size
__lowercase : str = concat_input
__lowercase : Any = position_embedding_type
__lowercase : Optional[Any] = classifier_dropout
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
| 363
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
| 306
| 0
|
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : str = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = XGLMTokenizer
_A : List[str] = XGLMTokenizerFast
_A : Union[str, Any] = True
_A : int = True
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase : List[str] = XGLMTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = "<pad>"
__lowercase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1008 )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = XGLMTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
__lowercase : Optional[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowercase : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowercase : Union[str, Any] = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_SCREAMING_SNAKE_CASE , f.name )
__lowercase : List[str] = XGLMTokenizer(f.name , keep_accents=_SCREAMING_SNAKE_CASE )
__lowercase : str = pickle.dumps(_SCREAMING_SNAKE_CASE )
pickle.loads(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase : int = self.get_tokenizer()
__lowercase : int = self.get_rust_tokenizer()
__lowercase : Optional[Any] = "I was born in 92000, and this is falsé."
__lowercase : Tuple = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowercase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase : Union[str, Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowercase : Any = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowercase : Dict = self.get_rust_tokenizer()
__lowercase : Union[str, Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowercase : Optional[Any] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = "Hello World!"
__lowercase : int = [2, 31227, 4447, 35]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
__lowercase : str = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = {
"input_ids": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="""facebook/xglm-564M""" , padding=_SCREAMING_SNAKE_CASE , )
| 364
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 306
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Union[str, Any] = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 365
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : List[str] = 2_56
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
__lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( lowerCAmelCase_ : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , *,
__a : float = 0.85 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = duplication_jaccard_threshold
__lowercase : Optional[Any] = NUM_PERM
__lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase : List[str] = defaultdict(__a )
def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None:
"""simple docstring"""
__lowercase : List[Any] = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase : Dict = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase : List[str] = [base] + list(__a )
# reformat the cluster to be a list of dict
__lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def lowerCAmelCase ( self : Any , __a : int ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase , __lowercase : Union[str, Any] = element
__lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ):
__lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[str] = get_tokens(lowerCAmelCase_ )
__lowercase : Dict = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : List[str] = None
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = []
for elementa in cluster:
__lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase : Dict = 1
extremes.append(lowerCAmelCase_ )
return extremes
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
global _shared_dataset
__lowercase : Tuple = dataset
__lowercase : Optional[int] = []
__lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ):
__lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase : int = {}
__lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase : Optional[Any] = element
__lowercase : int = duplicate_indices - set(extreme_dict.keys() )
__lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(lowerCAmelCase_ )}" )
print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" )
print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" )
return ds_filter, duplicate_clusters
| 306
| 0
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCamelCase : Optional[int] = """base_with_context"""
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
__lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
__lowercase : Dict = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=SCREAMING_SNAKE_CASE_ )
for lyr_num, lyr in enumerate(model.encoders ):
__lowercase : Optional[int] = weights[F"layers_{lyr_num}"]
__lowercase : int = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
__lowercase : List[str] = ly_weight["""attention"""]
__lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__lowercase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__lowercase : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__lowercase : int = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__lowercase : Any = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__lowercase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__lowercase : Dict = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ):
__lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
__lowercase : Dict = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=SCREAMING_SNAKE_CASE_ )
for lyr_num, lyr in enumerate(model.encoders ):
__lowercase : int = weights[F"layers_{lyr_num}"]
__lowercase : int = ly_weight["""attention"""]
__lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__lowercase : str = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__lowercase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__lowercase : int = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__lowercase : Any = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
__lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__lowercase : Dict = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__lowercase : str = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
__lowercase : Tuple = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
__lowercase : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=SCREAMING_SNAKE_CASE_ )
__lowercase : Tuple = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__lowercase : Tuple = weights[F"layers_{lyr_num}"]
__lowercase : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
__lowercase : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
__lowercase : Optional[Any] = ly_weight["""self_attention"""]
__lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__lowercase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__lowercase : int = ly_weight["""MultiHeadDotProductAttention_0"""]
__lowercase : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
__lowercase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
__lowercase : Any = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
__lowercase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
__lowercase : str = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
__lowercase : int = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
__lowercase : str = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
__lowercase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
__lowercase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
__lowercase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
__lowercase : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
__lowercase : Any = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def snake_case_ ( lowerCAmelCase_ : Dict ):
__lowercase : Optional[Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
__lowercase : List[Any] = jnp.tree_util.tree_map(onp.array , SCREAMING_SNAKE_CASE_ )
__lowercase : Tuple = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
__lowercase : List[str] = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
__lowercase : Optional[int] = inference.parse_training_gin_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowercase : Optional[int] = inference.InferenceModel(args.checkpoint_path , SCREAMING_SNAKE_CASE_ )
__lowercase : Optional[int] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
__lowercase : Tuple = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
__lowercase : Tuple = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
__lowercase : Tuple = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__lowercase : List[Any] = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , SCREAMING_SNAKE_CASE_ )
__lowercase : int = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , SCREAMING_SNAKE_CASE_ )
__lowercase : str = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , SCREAMING_SNAKE_CASE_ )
__lowercase : List[str] = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
__lowercase : Optional[int] = SpectrogramDiffusionPipeline(
notes_encoder=SCREAMING_SNAKE_CASE_ , continuous_encoder=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , melgan=SCREAMING_SNAKE_CASE_ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
lowerCamelCase : Optional[int] = parser.parse_args()
main(args)
| 366
|
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''image_processor''', '''feature_extractor''']
_A : List[Any] = '''TvltImageProcessor'''
_A : Optional[int] = '''TvltFeatureExtractor'''
def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__(image_processor=__a , feature_extractor=__a )
__lowercase : Union[str, Any] = image_processor
__lowercase : Tuple = feature_extractor
def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowercase : Tuple = None
if images is not None:
__lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
__lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
__lowercase : Optional[Any] = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
__lowercase : Tuple = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.image_processor.model_input_names
__lowercase : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 306
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , __a : List[Any] , ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : List[str] = 13
__lowercase : List[str] = 7
__lowercase : Optional[Any] = True
__lowercase : Tuple = True
__lowercase : Optional[Any] = True
__lowercase : Any = True
__lowercase : int = True
__lowercase : Dict = False
__lowercase : List[Any] = False
__lowercase : Dict = False
__lowercase : int = 2
__lowercase : Tuple = 99
__lowercase : List[str] = 0
__lowercase : int = 32
__lowercase : Dict = 2
__lowercase : str = 4
__lowercase : Optional[Any] = 0.1
__lowercase : Dict = 0.1
__lowercase : List[str] = 512
__lowercase : Dict = 16
__lowercase : Union[str, Any] = 2
__lowercase : Dict = 0.02
__lowercase : Any = 3
__lowercase : Optional[int] = 4
__lowercase : Tuple = """last"""
__lowercase : Any = True
__lowercase : str = None
__lowercase : str = 0
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__lowercase : Any = None
if self.use_input_lengths:
__lowercase : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowercase : Tuple = None
if self.use_token_type_ids:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowercase : Optional[int] = None
__lowercase : Dict = None
__lowercase : List[str] = None
if self.use_labels:
__lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Dict = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__lowercase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase ( self : List[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Optional[Any] , __a : Tuple , __a : Optional[int] , __a : Optional[int] , __a : List[str] , __a : str , __a : str , ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = TFFlaubertModel(config=_UpperCAmelCase )
__lowercase : Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
__lowercase : str = model(_UpperCAmelCase )
__lowercase : Any = [input_ids, input_mask]
__lowercase : Optional[int] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : int , __a : Any , __a : Union[str, Any] , __a : Optional[int] , __a : Optional[Any] , __a : List[Any] , __a : Optional[int] , __a : int , ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = TFFlaubertWithLMHeadModel(_UpperCAmelCase )
__lowercase : Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
__lowercase : Optional[int] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : List[str] , __a : List[str] , __a : Dict , __a : Tuple , __a : Optional[Any] , __a : List[str] , __a : str , __a : str , __a : List[Any] , __a : List[Any] , ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = TFFlaubertForQuestionAnsweringSimple(_UpperCAmelCase )
__lowercase : Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths}
__lowercase : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Optional[Any] , __a : List[Any] , __a : List[str] , __a : str , __a : List[str] , __a : Optional[int] , __a : Optional[int] , __a : str , __a : List[str] , __a : Dict , ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = TFFlaubertForSequenceClassification(_UpperCAmelCase )
__lowercase : Tuple = {"""input_ids""": input_ids, """lengths""": input_lengths}
__lowercase : Optional[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self : int , __a : Union[str, Any] , __a : Dict , __a : str , __a : List[str] , __a : int , __a : Optional[int] , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any] , ) -> Any:
"""simple docstring"""
__lowercase : int = self.num_labels
__lowercase : Union[str, Any] = TFFlaubertForTokenClassification(config=_UpperCAmelCase )
__lowercase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__lowercase : Any = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : int , __a : Any , __a : List[Any] , __a : Any , __a : Optional[int] , __a : Any , __a : List[str] , __a : List[Any] , __a : List[Any] , __a : str , ) -> Tuple:
"""simple docstring"""
__lowercase : Any = self.num_choices
__lowercase : List[str] = TFFlaubertForMultipleChoice(config=_UpperCAmelCase )
__lowercase : Any = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase : Dict = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase : List[str] = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase : Optional[int] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
__lowercase : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : Optional[Any] = config_and_inputs
__lowercase : Tuple = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
_A : str = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_A : Optional[Any] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_A : Union[str, Any] = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : List[Any] = False
_A : List[Any] = False
def lowerCAmelCase ( self : Optional[int] , __a : Tuple , __a : Optional[int] , __a : Optional[Any] , __a : int , __a : Tuple ) -> Tuple:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = TFFlaubertModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=37 )
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_UpperCAmelCase )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_UpperCAmelCase )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_UpperCAmelCase )
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_UpperCAmelCase )
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_UpperCAmelCase )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Union[str, Any] = TFFlaubertModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
__lowercase : Optional[Any] = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__lowercase : int = model(_UpperCAmelCase )[0]
__lowercase : str = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice.
__lowercase : Tuple = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 367
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : int = batch_size
__lowercase : Any = seq_length
__lowercase : str = is_training
__lowercase : str = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : int = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : Union[str, Any] = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : Tuple = scope
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
__lowercase : str = model(__a , attention_mask=__a )
__lowercase : List[Any] = model(__a )
__lowercase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowercase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Any = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[str] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = False
_A : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = ()
_A : List[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Optional[Any] = True
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = EsmModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : List[str] = EsmEmbeddings(config=__a )
__lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowercase : int = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : Optional[Any] = EsmEmbeddings(config=__a )
__lowercase : Optional[int] = torch.empty(2 , 4 , 30 )
__lowercase : Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( __a ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[str] = model(__a )[0]
__lowercase : Union[str, Any] = 33
__lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
__lowercase : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : Any = model(__a )[0]
# compare the actual values for a slice.
__lowercase : int = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 306
| 0
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : str = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( _a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = DebertaVaTokenizer
_A : Any = DebertaVaTokenizerFast
_A : Union[str, Any] = True
_A : Tuple = True
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase : Tuple = DebertaVaTokenizer(__lowerCamelCase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : int , __a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = """this is a test"""
__lowercase : Dict = """this is a test"""
return input_text, output_text
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = """<pad>"""
__lowercase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ) , __lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ) , __lowerCamelCase )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(__lowerCamelCase ) , 30001 )
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = """ \tHeLLo!how \n Are yoU? """
__lowercase : Any = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
__lowercase : Optional[Any] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase )
__lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Any = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase )
__lowercase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = """I was born in 92000, and this is falsé."""
__lowercase : int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase : int = DebertaVaTokenizer(__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Optional[int] = DebertaVaTokenizerFast(__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = """I was born in 92000, and this is falsé."""
__lowercase : Any = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase : Tuple = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = """I was born in 92000, and this is falsé."""
__lowercase : List[Any] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowercase : Tuple = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = """I was born in 92000, and this is falsé."""
__lowercase : int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
__lowercase : List[str] = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : List[str] = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = """ \tHeLLo!how \n Are yoU? """
__lowercase : Dict = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
__lowercase : int = DebertaVaTokenizer(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Any = DebertaVaTokenizerFast(__lowerCamelCase , do_lower_case=__lowerCamelCase , split_by_punct=__lowerCamelCase )
__lowercase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
__lowercase : int = self.get_tokenizer()
__lowercase : str = self.get_rust_tokenizer()
__lowercase : Dict = """I was born in 92000, and this is falsé."""
__lowercase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
__lowercase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase ) )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : List[str] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
__lowercase : Optional[int] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : int = self.get_rust_tokenizer()
__lowercase : Tuple = tokenizer.encode(__lowerCamelCase )
__lowercase : Dict = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = """This is a test"""
__lowercase : str = [13, 1, 4398, 25, 21, 1289]
__lowercase : int = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
__lowercase : Any = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
__lowercase : str = DebertaVaTokenizer(__lowerCamelCase , keep_accents=__lowerCamelCase )
__lowercase : Union[str, Any] = DebertaVaTokenizerFast(__lowerCamelCase , keep_accents=__lowerCamelCase )
__lowercase : Optional[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Union[str, Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Any = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Optional[Any] = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : int = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
# fmt: off
__lowercase : Optional[Any] = """I was born in 92000, and this is falsé."""
__lowercase : Any = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
__lowercase : Union[str, Any] = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
__lowercase : Optional[Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
__lowercase : str = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Any = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : List[Any] = rust_tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : int = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
__lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = DebertaVaTokenizer(__lowerCamelCase )
__lowercase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
__lowercase : Any = tokenizer.encode("""multi-sequence build""" )
__lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
__lowercase : str = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , __lowerCamelCase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , __lowerCamelCase , )
@slow
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = {"""input_ids""": [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 368
|
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 306
| 0
|
import numpy
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , __a : numpy.ndarray , __a : numpy.ndarray ) -> None:
"""simple docstring"""
__lowercase : Any = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__lowercase : Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__lowercase : int = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__lowercase : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
__lowercase : Tuple = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__lowercase : List[str] = numpy.zeros(output_array.shape )
def lowerCAmelCase ( self : Optional[Any] ) -> numpy.ndarray:
"""simple docstring"""
__lowercase : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__lowercase : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__lowercase : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
__lowercase : Any = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__lowercase : Any = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__lowercase : str = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCAmelCase ( self : Dict , __a : numpy.ndarray , __a : int , __a : bool ) -> None:
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
__lowercase : Union[str, Any] = self.feedforward()
self.back_propagation()
if give_loss:
__lowercase : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"Iteration {iteration} Loss: {loss}" )
def lowerCAmelCase ( self : Optional[Any] , __a : numpy.ndarray ) -> int:
"""simple docstring"""
__lowercase : Dict = input_arr
__lowercase : str = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__lowercase : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__lowercase : List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def snake_case_ ( lowerCAmelCase_ : Any ):
return 1 / (1 + numpy.exp(-value ))
def snake_case_ ( lowerCAmelCase_ : Tuple ):
return (value) * (1 - (value))
def snake_case_ ( ):
__lowercase : Dict = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__lowercase : Optional[Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__lowercase : List[Any] = TwoHiddenLayerNeuralNetwork(
input_array=a__ , output_array=a__ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a__ , iterations=10 , give_loss=a__ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 369
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = (DPMSolverSDEScheduler,)
_A : Dict = 10
def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**__a )
return config
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.scheduler_classes[0]
__lowercase : List[str] = self.get_scheduler_config()
__lowercase : Any = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[Any] = self.dummy_model()
__lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Optional[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Optional[Any] = scheduler.step(__a , __a , __a )
__lowercase : str = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowercase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[int] = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Dict = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Dict = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[int] = model(__a , __a )
__lowercase : Optional[int] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : List[str] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config()
__lowercase : Optional[int] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : int = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase : int = scheduler.scale_model_input(__a , __a )
__lowercase : List[str] = model(__a , __a )
__lowercase : List[str] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : List[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.scheduler_classes[0]
__lowercase : List[Any] = self.get_scheduler_config()
__lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : List[str] = self.dummy_model()
__lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
__lowercase : str = sample.to(__a )
for t in scheduler.timesteps:
__lowercase : List[Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Any = scheduler.step(__a , __a , __a )
__lowercase : Optional[Any] = output.prev_sample
__lowercase : Any = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 306
| 0
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCamelCase : Any = logging.getLogger(__name__)
lowerCamelCase : Union[str, Any] = '''pytorch_model.bin'''
@dataclasses.dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[int] = dataclasses.field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models.'''} )
_A : int = dataclasses.field(
default=a__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co.'''} , )
@dataclasses.dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Tuple = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the training data.'''} )
_A : Optional[int] = dataclasses.field(metadata={'''help''': '''A csv or a json file containing the data to predict on.'''} )
_A : Any = dataclasses.field(
default=a__ , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
_A : Dict = dataclasses.field(
default=a__ , metadata={'''help''': '''The name of the task to train on.'''} , )
_A : Optional[Any] = dataclasses.field(
default=a__ , metadata={'''help''': '''The list of labels for the task.'''} )
@dataclasses.dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : List[str] = dataclasses.field(
metadata={'''help''': '''The output directory where the model predictions and checkpoints will be written.'''} )
_A : Optional[int] = dataclasses.field(
default='''accuracy''' , metadata={'''help''': '''The evaluation metric used for the task.'''} )
_A : Union[str, Any] = dataclasses.field(
default='''no''' , metadata={
'''help''': '''The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]'''
} , )
_A : Dict = dataclasses.field(
default=10 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
_A : List[Any] = dataclasses.field(
default=0.0 , metadata={
'''help''': '''How much the specified evaluation metric must improve to satisfy early stopping conditions.'''
} , )
_A : Optional[int] = dataclasses.field(
default=a__ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the confidence score.'''} , )
_A : Any = dataclasses.field(
default=a__ , metadata={'''help''': '''Whether to filter the pseudo-labeled data based on the validation performance.'''} , )
_A : Tuple = dataclasses.field(
default=a__ , metadata={'''help''': '''Whether to fine-tune on labeled data after pseudo training.'''} , )
_A : Union[str, Any] = dataclasses.field(
default=0.0 , metadata={'''help''': '''Confidence threshold for pseudo-labeled data filtering.'''} , )
_A : Optional[Any] = dataclasses.field(
default=100 , metadata={'''help''': '''Number of evaluation calls with no improvement after which training will be stopped.'''} , )
_A : Any = dataclasses.field(
default=a__ , metadata={'''help''': '''Random seed for initialization.'''} , )
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : Union[str, Any] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__lowercase : Optional[int] = dataset.filter(lambda lowerCAmelCase_ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__lowercase : Any = int(eval_result * len(_lowerCamelCase ) )
print(_lowerCamelCase )
__lowercase : int = dataset.sort("""probability""" , reverse=_lowerCamelCase )
__lowercase : Optional[Any] = dataset.select(range(_lowerCamelCase ) )
__lowercase : List[str] = dataset.remove_columns(["""label""", """probability"""] )
__lowercase : Dict = dataset.rename_column("""prediction""" , """label""" )
__lowercase : Optional[int] = dataset.map(lambda lowerCAmelCase_ : {"label": idalabel[example["label"]]} )
__lowercase : str = dataset.shuffle(seed=args.seed )
__lowercase : int = os.path.join(_lowerCamelCase , F"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(_lowerCamelCase , index=_lowerCamelCase )
else:
dataset.to_json(_lowerCamelCase )
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , **lowerCAmelCase_ : Any ):
__lowercase : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__lowercase : Union[str, Any] = STModelArguments(model_name_or_path=_lowerCamelCase )
__lowercase : Dict = STDataArguments(train_file=_lowerCamelCase , infer_file=_lowerCamelCase )
__lowercase : int = STTrainingArguments(output_dir=_lowerCamelCase )
__lowercase : Any = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_lowerCamelCase ).items():
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for key, value in kwargs.items():
if hasattr(_lowerCamelCase , _lowerCamelCase ):
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Sanity checks
__lowercase : Any = {}
__lowercase : Union[str, Any] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__lowercase : Optional[int] = args.train_file
__lowercase : str = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__lowercase : str = args.eval_file
for key in data_files:
__lowercase : Optional[Any] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
__lowercase : Optional[int] = extension
else:
assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
__lowercase : int = F"{args.output_dir}/self-train_iter-{{}}".format
__lowercase : Optional[int] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
accelerator.wait_for_everyone()
__lowercase : Tuple = None
__lowercase : Optional[Any] = None
__lowercase : Optional[int] = 0
__lowercase : Any = False
# Show the progress bar
__lowercase : Optional[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__lowercase : Optional[Any] = data_dir_format(_lowerCamelCase )
assert os.path.exists(_lowerCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__lowercase : Any = os.path.join(_lowerCamelCase , """stage-1""" )
__lowercase : str = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_lowerCamelCase , _lowerCamelCase ):
arguments_dict.update({key: value} )
__lowercase : List[str] = os.path.join(_lowerCamelCase , """best-checkpoint""" , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , _lowerCamelCase , _lowerCamelCase , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , _lowerCamelCase )
finetune(**_lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(_lowerCamelCase )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , _lowerCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__lowercase : Union[str, Any] = os.path.join(_lowerCamelCase , """best-checkpoint""" )
__lowercase : List[Any] = os.path.join(_lowerCamelCase , """stage-2""" )
# Update arguments_dict
__lowercase : Any = model_path
__lowercase : List[Any] = data_files["train"]
__lowercase : Optional[Any] = current_output_dir
__lowercase : int = os.path.join(_lowerCamelCase , """best-checkpoint""" , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , _lowerCamelCase , _lowerCamelCase , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , _lowerCamelCase )
finetune(**_lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(_lowerCamelCase )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , _lowerCamelCase )
__lowercase : Union[str, Any] = iteration
__lowercase : Optional[Any] = data_dir_format(iteration + 1 )
__lowercase : Optional[Any] = AutoConfig.from_pretrained(os.path.join(_lowerCamelCase , """best-checkpoint""" ) )
__lowercase : Tuple = config.idalabel
__lowercase : Tuple = os.path.join(_lowerCamelCase , """eval_results_best-checkpoint.json""" )
__lowercase : Any = os.path.join(_lowerCamelCase , """test_results_best-checkpoint.json""" )
assert os.path.exists(_lowerCamelCase )
with open(_lowerCamelCase , """r""" ) as f:
__lowercase : Optional[Any] = float(json.load(_lowerCamelCase )[args.eval_metric] )
__lowercase : str = os.path.join(_lowerCamelCase , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(_lowerCamelCase )
# Loading the dataset from local csv or json files.
__lowercase : Union[str, Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["data"]
__lowercase : Any = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
shutil.copy(_lowerCamelCase , os.path.join(_lowerCamelCase , F"eval_results_iter-{iteration}.json" ) )
if os.path.exists(_lowerCamelCase ):
shutil.copy(_lowerCamelCase , os.path.join(_lowerCamelCase , F"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
accelerator.wait_for_everyone()
__lowercase : str = os.path.join(_lowerCamelCase , F"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__lowercase : str = eval_result
if best_iteration is None:
__lowercase : Union[str, Any] = new_iteration
__lowercase : Optional[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__lowercase : str = new_iteration
__lowercase : List[str] = new_eval_result
__lowercase : Any = 0
else:
if new_eval_result == best_eval_result:
__lowercase : Optional[Any] = new_iteration
__lowercase : Union[str, Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__lowercase : Optional[int] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , _lowerCamelCase )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowerCamelCase , F"eval_results_iter-{iteration}.json" ) , os.path.join(_lowerCamelCase , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowerCamelCase , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(_lowerCamelCase , """eval_results_best-iteration.json""" ) , )
| 370
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : str = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase : List[str] = args.per_device_eval_batch_size
lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : List[str] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ )
# start time
__lowercase : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowercase : int = time.time()
__lowercase : Union[str, Any] = end_time - start_time
__lowercase : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names
lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def snake_case_ ( lowerCAmelCase_ : int ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowercase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowercase : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowercase : Any = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ )
__lowercase : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowercase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowercase : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCamelCase : Tuple = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase : Union[str, Any] = default_data_collator
lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowercase : int = postprocess_qa_predictions(
examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowercase : Optional[int] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ )
lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def snake_case_ ( lowerCAmelCase_ : str ):
return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase : int = 0.0
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = timeit.default_timer()
lowerCamelCase : List[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : Dict = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 306
| 0
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase : Any = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Tuple , __a : Union[str, Any] , __a : List[str]=False , __a : Dict=True , __a : Union[str, Any]=False , __a : Dict="<s>" , __a : List[Any]="</s>" , __a : List[Any]="<unk>" , __a : List[str]="<sep>" , __a : Optional[int]="<pad>" , __a : Optional[Any]="<cls>" , __a : Optional[int]="<mask>" , __a : List[str]=["<eop>", "<eod>"] , __a : int = None , **__a : Optional[Any] , ) -> None:
"""simple docstring"""
__lowercase : str = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
__lowercase : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
__lowercase : int = 3
__lowercase : Optional[int] = do_lower_case
__lowercase : Union[str, Any] = remove_space
__lowercase : Optional[int] = keep_accents
__lowercase : Any = vocab_file
__lowercase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
__lowercase : Tuple = jieba
__lowercase : int = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ) -> Any:
"""simple docstring"""
__lowercase : Any = self.__dict__.copy()
__lowercase : Optional[Any] = None
return state
def __setstate__( self : Dict , __a : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase : Any = {}
__lowercase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Tuple , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if self.remove_space:
__lowercase : Any = ''' '''.join(inputs.strip().split() )
else:
__lowercase : int = inputs
__lowercase : Any = outputs.replace("""``""" , """\"""" ).replace("""\'\'""" , """\"""" )
if not self.keep_accents:
__lowercase : Tuple = unicodedata.normalize("""NFKD""" , _SCREAMING_SNAKE_CASE )
__lowercase : int = ''''''.join([c for c in outputs if not unicodedata.combining(_SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
__lowercase : Optional[int] = outputs.lower()
return outputs
def lowerCAmelCase ( self : List[Any] , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Tuple = self.preprocess_text(_SCREAMING_SNAKE_CASE )
__lowercase : int = self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
__lowercase : Optional[Any] = []
for piece in pieces:
if len(_SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__lowercase : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(_SCREAMING_SNAKE_CASE , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowercase : Optional[int] = cur_pieces[1:]
else:
__lowercase : Optional[int] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_SCREAMING_SNAKE_CASE )
else:
new_pieces.append(_SCREAMING_SNAKE_CASE )
return new_pieces
def lowerCAmelCase ( self : Any , __a : Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self : Any , __a : Any ) -> List[str]:
"""simple docstring"""
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase : List[Any] = ''''''.join(_SCREAMING_SNAKE_CASE ).replace(_SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : Union[str, Any] = None ) -> List[int]:
"""simple docstring"""
__lowercase : Optional[Any] = [self.sep_token_id]
__lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase ( self : int , __a : Optional[Any] , __a : str = None , __a : List[Any] = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1]
def lowerCAmelCase ( self : List[Any] , __a : int , __a : Tuple = None ) -> List[int]:
"""simple docstring"""
__lowercase : Optional[int] = [self.sep_token_id]
__lowercase : List[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase ( self : Union[str, Any] , __a : Any , __a : Dict = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : int = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , """wb""" ) as fi:
__lowercase : Dict = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def lowerCAmelCase ( self : Any , *__a : Optional[Any] , **__a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = super()._decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowercase : Tuple = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 371
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''nllb-moe'''
_A : List[str] = ['''past_key_values''']
_A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any:
"""simple docstring"""
__lowercase : int = vocab_size
__lowercase : List[Any] = max_position_embeddings
__lowercase : Tuple = d_model
__lowercase : str = encoder_ffn_dim
__lowercase : List[str] = encoder_layers
__lowercase : int = encoder_attention_heads
__lowercase : List[Any] = decoder_ffn_dim
__lowercase : int = decoder_layers
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : Union[str, Any] = dropout
__lowercase : str = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : List[Any] = activation_function
__lowercase : List[str] = init_std
__lowercase : Optional[int] = encoder_layerdrop
__lowercase : str = decoder_layerdrop
__lowercase : Dict = use_cache
__lowercase : Optional[Any] = encoder_layers
__lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : List[Any] = router_z_loss_coef
__lowercase : Tuple = router_aux_loss_coef
__lowercase : str = decoder_sparse_step
__lowercase : Any = encoder_sparse_step
__lowercase : str = num_experts
__lowercase : List[Any] = expert_capacity
__lowercase : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase : Optional[int] = router_dtype
__lowercase : Any = router_ignore_padding_tokens
__lowercase : Optional[Any] = batch_prioritized_routing
__lowercase : str = second_expert_policy
__lowercase : List[str] = normalize_router_prob_before_dropping
__lowercase : List[Any] = moe_eval_capacity_token_fraction
__lowercase : List[str] = moe_token_dropout
__lowercase : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 306
| 0
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
lowerCamelCase : Dict = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
lowerCamelCase : Dict = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
lowerCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
lowerCamelCase : Dict = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
lowerCamelCase : str = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
lowerCamelCase : int = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
lowerCamelCase : List[str] = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
lowerCamelCase : Optional[Any] = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
lowerCamelCase : Union[str, Any] = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
lowerCamelCase : List[str] = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
lowerCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCamelCase : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCamelCase : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCamelCase : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowerCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
_A : Any = FLAX_MODEL_MAPPING
lowerCamelCase : Optional[Any] = auto_class_update(FlaxAutoModel)
class lowerCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
_A : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCamelCase : Dict = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowerCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
_A : str = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCamelCase : Dict = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowerCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
_A : Optional[Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase : Optional[Any] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowerCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
_A : Dict = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowerCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
_A : Dict = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowerCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
_A : str = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCamelCase : Tuple = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowerCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
_A : Optional[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowerCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
_A : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCamelCase : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowerCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
_A : int = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCamelCase : str = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowerCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
_A : List[str] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowerCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
_A : Optional[Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowerCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
_A : Dict = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCamelCase : Any = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 350
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[Any] = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['''PoolFormerFeatureExtractor''']
lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 306
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
class lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_A : Union[str, Any] = "timm_backbone"
def __init__( self : Dict , __a : Optional[int]=None , __a : Tuple=3 , __a : Any=True , __a : Dict=True , __a : Optional[int]=None , **__a : Optional[int] , ) -> str:
"""simple docstring"""
super().__init__(**_snake_case )
__lowercase : Optional[int] = backbone
__lowercase : List[str] = num_channels
__lowercase : List[str] = features_only
__lowercase : Union[str, Any] = use_pretrained_backbone
__lowercase : Optional[Any] = True
__lowercase : str = out_indices if out_indices is not None else (-1,)
| 351
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str=False ):
__lowercase : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"blocks.{i}.norm1.weight", F"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"blocks.{i}.norm1.bias", F"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((F"blocks.{i}.attn.proj.weight", F"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((F"blocks.{i}.attn.proj.bias", F"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"blocks.{i}.norm2.weight", F"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"blocks.{i}.norm2.bias", F"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc1.weight", F"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc1.bias", F"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"blocks.{i}.mlp.fc2.weight", F"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"blocks.{i}.mlp.fc2.bias", F"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowercase : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowercase : str = ''''''
else:
__lowercase : Optional[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase : Optional[int] = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
__lowercase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Dict = in_proj_weight[
: config.hidden_size, :
]
__lowercase : List[Any] = in_proj_bias[: config.hidden_size]
__lowercase : List[str] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase : str = in_proj_weight[
-config.hidden_size :, :
]
__lowercase : Union[str, Any] = in_proj_bias[-config.hidden_size :]
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
__lowercase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : str ):
__lowercase : int = dct.pop(UpperCamelCase__ )
__lowercase : Optional[int] = val
def snake_case_ ( ):
__lowercase : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase : Any = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ):
__lowercase : Tuple = ViTConfig()
__lowercase : Optional[int] = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__lowercase : Optional[int] = True
__lowercase : List[str] = int(vit_name[-12:-10] )
__lowercase : Dict = int(vit_name[-9:-6] )
else:
__lowercase : Optional[int] = 1000
__lowercase : Dict = '''huggingface/label-files'''
__lowercase : Optional[Any] = '''imagenet-1k-id2label.json'''
__lowercase : Tuple = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[int] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowercase : Optional[int] = idalabel
__lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()}
__lowercase : List[Any] = int(vit_name[-6:-4] )
__lowercase : Any = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
__lowercase : Any = 192
__lowercase : Tuple = 768
__lowercase : str = 12
__lowercase : int = 3
elif vit_name[9:].startswith("""small""" ):
__lowercase : Optional[int] = 384
__lowercase : List[Any] = 1536
__lowercase : Optional[Any] = 12
__lowercase : List[Any] = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
__lowercase : Dict = 768
__lowercase : List[Any] = 2304
__lowercase : Optional[int] = 8
__lowercase : Any = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
__lowercase : Union[str, Any] = 1024
__lowercase : Optional[Any] = 4096
__lowercase : str = 24
__lowercase : List[Any] = 16
elif vit_name[4:].startswith("""huge""" ):
__lowercase : Any = 1280
__lowercase : Optional[int] = 5120
__lowercase : Dict = 32
__lowercase : str = 16
# load original model from timm
__lowercase : Any = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__lowercase : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(UpperCamelCase__ )
__lowercase : Tuple = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__lowercase : Any = ViTModel(UpperCamelCase__ ).eval()
else:
__lowercase : Dict = ViTForImageClassification(UpperCamelCase__ ).eval()
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__lowercase : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
__lowercase : Optional[Any] = ViTImageProcessor(size=config.image_size )
__lowercase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowercase : Optional[int] = encoding['''pixel_values''']
__lowercase : List[Any] = model(UpperCamelCase__ )
if base_model:
__lowercase : int = timm_model.forward_features(UpperCamelCase__ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(UpperCamelCase__ , outputs.pooler_output , atol=1e-3 )
else:
__lowercase : Tuple = timm_model(UpperCamelCase__ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1e-3 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
lowerCamelCase : Dict = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 352
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__lowercase : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowercase : Optional[Any] = model(__a )["""last_hidden_state"""]
__lowercase : Any = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__lowercase : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 306
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Any = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase : Optional[Any] = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase : Optional[Any] = {
'facebook/nllb-large-en-ro': 10_24,
'facebook/nllb-200-distilled-600M': 10_24,
}
# fmt: off
lowerCamelCase : Any = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class lowerCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
_A : int = VOCAB_FILES_NAMES
_A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : List[str] = ['''input_ids''', '''attention_mask''']
_A : Optional[int] = NllbTokenizer
_A : Optional[int] = []
_A : List[Any] = []
def __init__( self : Optional[Any] , __a : str=None , __a : Union[str, Any]=None , __a : Optional[int]="<s>" , __a : Tuple="</s>" , __a : List[Any]="</s>" , __a : int="<s>" , __a : List[str]="<unk>" , __a : List[str]="<pad>" , __a : int="<mask>" , __a : Tuple=None , __a : int=None , __a : Union[str, Any]=None , __a : int=False , **__a : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
__lowercase : str = legacy_behaviour
super().__init__(
vocab_file=__a , tokenizer_file=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , src_lang=__a , tgt_lang=__a , additional_special_tokens=__a , legacy_behaviour=__a , **__a , )
__lowercase : int = vocab_file
__lowercase : str = False if not self.vocab_file else True
__lowercase : str = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
__lowercase : Optional[Any] = {
lang_code: self.convert_tokens_to_ids(__a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__lowercase : Optional[int] = src_lang if src_lang is not None else """eng_Latn"""
__lowercase : List[Any] = self.convert_tokens_to_ids(self._src_lang )
__lowercase : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowerCAmelCase ( self : List[Any] , __a : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase ( self : Any , __a : List[int] , __a : Optional[List[int]] = None ) -> int:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase ( self : int , __a : List[int] , __a : Optional[List[int]] = None ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = [self.sep_token_id]
__lowercase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : str , __a : Optional[Any] , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : List[str] ) -> List[Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__lowercase : Tuple = src_lang
__lowercase : List[str] = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
__lowercase : Any = self.convert_tokens_to_ids(__a )
__lowercase : Optional[int] = tgt_lang_id
return inputs
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : str = "eng_Latn" , __a : Optional[List[str]] = None , __a : str = "fra_Latn" , **__a : int , ) -> int:
"""simple docstring"""
__lowercase : List[Any] = src_lang
__lowercase : Optional[Any] = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase ( self : int , __a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = self.convert_tokens_to_ids(__a )
if self.legacy_behaviour:
__lowercase : Optional[Any] = []
__lowercase : Optional[int] = [self.eos_token_id, self.cur_lang_code]
else:
__lowercase : str = [self.cur_lang_code]
__lowercase : Union[str, Any] = [self.eos_token_id]
__lowercase : int = self.convert_ids_to_tokens(self.prefix_tokens )
__lowercase : Union[str, Any] = self.convert_ids_to_tokens(self.suffix_tokens )
__lowercase : Union[str, Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase ( self : List[str] , __a : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = self.convert_tokens_to_ids(__a )
if self.legacy_behaviour:
__lowercase : List[str] = []
__lowercase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
__lowercase : Optional[Any] = [self.cur_lang_code]
__lowercase : List[str] = [self.eos_token_id]
__lowercase : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
__lowercase : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
__lowercase : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase ( self : Optional[int] , __a : str , __a : Optional[str] = None ) -> List[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
__lowercase : List[str] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 353
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
__lowercase : str = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Tuple = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Optional[Any] = True
if a[i].islower():
__lowercase : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : Optional[int] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
lowerCamelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354
|
from scipy.stats import spearmanr
import datasets
lowerCamelCase : List[str] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCamelCase : List[str] = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCamelCase : Union[str, Any] = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = spearmanr(__a , __a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 306
| 0
|
import warnings
from functools import wraps
from typing import Callable
def snake_case_ ( lowerCAmelCase_ : Callable ):
@wraps(lowerCAmelCase_ )
def _inner_fn(*lowerCAmelCase_ : Any , **lowerCAmelCase_ : Union[str, Any] ):
warnings.warn(
(F"\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.") , lowerCAmelCase_ , )
return fn(*lowerCAmelCase_ , **lowerCAmelCase_ )
return _inner_fn
| 355
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 306
| 0
|
import re
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : Tuple = re.compile(
r"""^(?:0|94|\+94|0{2}94)""" r"""7(0|1|2|4|5|6|7|8)""" r"""(-| |)""" r"""\d{7}$""" )
return bool(re.search(__UpperCAmelCase , __UpperCAmelCase ) )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = '0094702343221'
print(is_sri_lankan_phone_number(phone))
| 356
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 306
| 0
|
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class lowerCAmelCase ( A__ ):
'''simple docstring'''
_A : torch.FloatTensor
_A : Optional[torch.FloatTensor] = None
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any=0.999 , lowerCAmelCase_ : int="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase_ : str ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase_ : List[str] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
__lowercase : Any = []
for i in range(lowercase_ ):
__lowercase : Optional[int] = i / num_diffusion_timesteps
__lowercase : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase_ ) / alpha_bar_fn(lowercase_ ) , lowercase_ ) )
return torch.tensor(lowercase_ , dtype=torch.floataa )
class lowerCAmelCase ( A__ , A__ ):
'''simple docstring'''
@register_to_config
def __init__( self : int , __a : Dict = 1000 , __a : Tuple = "fixed_small_log" , __a : str = True , __a : Optional[int] = 1.0 , __a : Tuple = "epsilon" , __a : Any = "squaredcos_cap_v2" , ) -> Dict:
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'""" )
__lowercase : Dict = betas_for_alpha_bar(lowerCamelCase__ )
__lowercase : Any = 1.0 - self.betas
__lowercase : Optional[int] = torch.cumprod(self.alphas , dim=0 )
__lowercase : Optional[int] = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
__lowercase : List[Any] = 1.0
# setable values
__lowercase : List[str] = None
__lowercase : Any = torch.from_numpy(np.arange(0 , lowerCamelCase__ )[::-1].copy() )
__lowercase : List[str] = variance_type
def lowerCAmelCase ( self : Any , __a : Optional[int] , __a : Union[str, Any] = None ) -> Tuple:
"""simple docstring"""
return sample
def lowerCAmelCase ( self : str , __a : Optional[int] , __a : List[str] = None ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = num_inference_steps
__lowercase : Dict = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
__lowercase : Dict = (np.arange(0 , lowerCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
__lowercase : List[str] = torch.from_numpy(lowerCamelCase__ ).to(lowerCamelCase__ )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[str]=None , __a : Dict=None , __a : int=None ) -> str:
"""simple docstring"""
if prev_timestep is None:
__lowercase : Union[str, Any] = t - 1
__lowercase : Optional[int] = self.alphas_cumprod[t]
__lowercase : int = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__lowercase : int = 1 - alpha_prod_t
__lowercase : str = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__lowercase : Dict = self.betas[t]
else:
__lowercase : str = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
__lowercase : str = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
__lowercase : Optional[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
__lowercase : Optional[Any] = torch.log(torch.clamp(lowerCamelCase__ , min=1E-20 ) )
__lowercase : int = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
__lowercase : Dict = variance.log()
__lowercase : int = beta.log()
__lowercase : Dict = (predicted_variance + 1) / 2
__lowercase : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase ( self : Tuple , __a : Optional[Any] , __a : List[Any] , __a : str , __a : str = None , __a : Optional[Any]=None , __a : Any = True , ) -> List[str]:
"""simple docstring"""
__lowercase : str = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
__lowercase , __lowercase : Union[str, Any] = torch.split(lowerCamelCase__ , sample.shape[1] , dim=1 )
else:
__lowercase : Any = None
# 1. compute alphas, betas
if prev_timestep is None:
__lowercase : str = t - 1
__lowercase : Union[str, Any] = self.alphas_cumprod[t]
__lowercase : Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
__lowercase : List[Any] = 1 - alpha_prod_t
__lowercase : Union[str, Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
__lowercase : List[str] = self.betas[t]
__lowercase : List[str] = self.alphas[t]
else:
__lowercase : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
__lowercase : int = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
__lowercase : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
__lowercase : Tuple = model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
__lowercase : str = torch.clamp(
lowerCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase : Tuple = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
__lowercase : Optional[Any] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowercase : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowercase : Tuple = 0
if t > 0:
__lowercase : int = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=lowerCamelCase__ , device=model_output.device )
__lowercase : str = self._get_variance(
lowerCamelCase__ , predicted_variance=lowerCamelCase__ , prev_timestep=lowerCamelCase__ , )
if self.variance_type == "fixed_small_log":
__lowercase : Optional[Any] = variance
elif self.variance_type == "learned_range":
__lowercase : Optional[int] = (0.5 * variance).exp()
else:
raise ValueError(
F"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
""" for the UnCLIPScheduler.""" )
__lowercase : List[Any] = variance * variance_noise
__lowercase : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=lowerCamelCase__ , pred_original_sample=lowerCamelCase__ )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : Tuple , __a : Optional[Any] , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
__lowercase : Optional[Any] = timesteps.to(original_samples.device )
__lowercase : Any = alphas_cumprod[timesteps] ** 0.5
__lowercase : Union[str, Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
__lowercase : str = sqrt_alpha_prod.unsqueeze(-1 )
__lowercase : str = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowercase : str = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
__lowercase : Optional[int] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
__lowercase : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 357
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 306
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Tuple = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
__lowercase : Union[str, Any] = DetaConfig(
backbone_config=A_ , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=A_ , with_box_refine=A_ , two_stage=A_ , )
# set labels
__lowercase : List[str] = '''huggingface/label-files'''
if "o365" in model_name:
__lowercase : Union[str, Any] = 366
__lowercase : Optional[int] = '''object365-id2label.json'''
else:
__lowercase : List[str] = 91
__lowercase : Dict = '''coco-detection-id2label.json'''
__lowercase : List[str] = num_labels
__lowercase : Union[str, Any] = json.load(open(cached_download(hf_hub_url(A_ , A_ , repo_type="""dataset""" ) ) , """r""" ) )
__lowercase : Optional[int] = {int(A_ ): v for k, v in idalabel.items()}
__lowercase : List[Any] = idalabel
__lowercase : List[Any] = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
__lowercase : str = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : Tuple = dct.pop(A_ )
__lowercase : Union[str, Any] = val
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict ):
__lowercase : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__lowercase : Tuple = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__lowercase : List[str] = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
__lowercase : Tuple = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Dict = in_proj_weight[:dim, :]
__lowercase : Dict = in_proj_bias[: dim]
__lowercase : List[Any] = in_proj_weight[
dim : dim * 2, :
]
__lowercase : Optional[Any] = in_proj_bias[
dim : dim * 2
]
__lowercase : str = in_proj_weight[
-dim :, :
]
__lowercase : int = in_proj_bias[-dim :]
# fmt: on
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple ):
# transformer decoder self-attention layers
__lowercase : int = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__lowercase : Dict = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
__lowercase : str = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : int = in_proj_weight[:hidden_size, :]
__lowercase : Optional[int] = in_proj_bias[:hidden_size]
__lowercase : Union[str, Any] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__lowercase : Optional[int] = in_proj_bias[hidden_size : hidden_size * 2]
__lowercase : str = in_proj_weight[-hidden_size:, :]
__lowercase : Union[str, Any] = in_proj_bias[-hidden_size:]
def snake_case_ ( ):
__lowercase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__lowercase : Optional[Any] = Image.open(requests.get(A_ , stream=A_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ):
__lowercase : str = get_deta_config(A_ )
# load original state dict
if model_name == "deta-swin-large":
__lowercase : Dict = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
__lowercase : Dict = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(F"Model name {model_name} not supported" )
__lowercase : Dict = torch.load(A_ , map_location="""cpu""" )['''model''']
# original state dict
for name, param in state_dict.items():
print(A_ , param.shape )
# rename keys
__lowercase : Tuple = create_rename_keys(A_ )
for src, dest in rename_keys:
rename_key(A_ , A_ , A_ )
read_in_swin_q_k_v(A_ , config.backbone_config )
read_in_decoder_q_k_v(A_ , A_ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__lowercase : List[Any] = state_dict.pop(A_ )
__lowercase : Any = val
if "input_proj" in key:
__lowercase : Optional[int] = state_dict.pop(A_ )
__lowercase : Any = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__lowercase : Union[str, Any] = state_dict.pop(A_ )
__lowercase : Tuple = val
# finally, create HuggingFace model and load state dict
__lowercase : Union[str, Any] = DetaForObjectDetection(A_ )
model.load_state_dict(A_ )
model.eval()
__lowercase : Tuple = '''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(A_ )
# load image processor
__lowercase : Optional[Any] = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
__lowercase : Union[str, Any] = prepare_img()
__lowercase : Any = processor(images=A_ , return_tensors="""pt""" )
__lowercase : Any = encoding['''pixel_values''']
__lowercase : Optional[int] = model(pixel_values.to(A_ ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__lowercase : str = torch.tensor(
[[-7.6_308, -2.8_485, -5.3_737], [-7.2_037, -4.5_505, -4.8_027], [-7.2_943, -4.2_611, -4.6_617]] )
__lowercase : Optional[Any] = torch.tensor([[0.4_987, 0.4_969, 0.9_999], [0.2_549, 0.5_498, 0.4_805], [0.5_498, 0.2_757, 0.0_569]] )
elif model_name == "deta-swin-large-o365":
__lowercase : Optional[Any] = torch.tensor(
[[-8.0_122, -3.5_720, -4.9_717], [-8.1_547, -3.6_886, -4.6_389], [-7.6_610, -3.6_194, -5.0_134]] )
__lowercase : List[Any] = torch.tensor([[0.2_523, 0.5_549, 0.4_881], [0.7_715, 0.4_149, 0.4_601], [0.5_503, 0.2_753, 0.0_575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(A_ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(A_ ) , atol=1e-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(A_ ).mkdir(exist_ok=A_ )
model.save_pretrained(A_ )
processor.save_pretrained(A_ )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 358
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class lowerCAmelCase :
'''simple docstring'''
_A : str = XGLMConfig
_A : Optional[Any] = {}
_A : List[str] = '''gelu'''
def __init__( self : List[Any] , __a : Optional[int] , __a : Optional[int]=14 , __a : Optional[int]=7 , __a : List[str]=True , __a : Optional[int]=True , __a : Union[str, Any]=True , __a : Optional[int]=99 , __a : Optional[int]=32 , __a : Tuple=2 , __a : int=4 , __a : List[Any]=37 , __a : Any="gelu" , __a : Optional[int]=0.1 , __a : List[str]=0.1 , __a : str=512 , __a : Tuple=0.02 , ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = parent
__lowercase : Union[str, Any] = batch_size
__lowercase : Optional[int] = seq_length
__lowercase : Union[str, Any] = is_training
__lowercase : List[Any] = use_input_mask
__lowercase : int = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : Union[str, Any] = d_model
__lowercase : Any = num_hidden_layers
__lowercase : Any = num_attention_heads
__lowercase : Optional[int] = ffn_dim
__lowercase : int = activation_function
__lowercase : int = activation_dropout
__lowercase : str = attention_dropout
__lowercase : Dict = max_position_embeddings
__lowercase : Tuple = initializer_range
__lowercase : Optional[Any] = None
__lowercase : str = 0
__lowercase : Union[str, Any] = 2
__lowercase : str = 1
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__lowercase : List[Any] = None
if self.use_input_mask:
__lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Tuple = self.get_config()
__lowercase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__A , )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Dict = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Union[str, Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_A : Dict = (TFXGLMForCausalLM,) if is_tf_available() else ()
_A : str = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
_A : List[str] = False
_A : Dict = False
_A : str = False
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : str = TFXGLMModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__A , n_embd=37 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Dict = TFXGLMModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any , __a : str=True ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
__lowercase : int = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowercase : Dict = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__lowercase : Tuple = model.generate(__A , do_sample=__A , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __A )
@slow
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
__lowercase : int = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
__lowercase : Union[str, Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
__lowercase : Any = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
__lowercase : Any = model.generate(__A , do_sample=__A , seed=[7, 0] )
__lowercase : Union[str, Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=__A )
__lowercase : Optional[int] = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(__A , __A )
@slow
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
__lowercase : List[str] = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
__lowercase : Union[str, Any] = """left"""
# use different length sentences to test batching
__lowercase : Any = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
__lowercase : List[Any] = tokenizer(__A , return_tensors="""tf""" , padding=__A )
__lowercase : Union[str, Any] = inputs["""input_ids"""]
__lowercase : List[str] = model.generate(input_ids=__A , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
__lowercase : List[str] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
__lowercase : Dict = model.generate(input_ids=__A , max_new_tokens=12 )
__lowercase : str = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
__lowercase : Dict = model.generate(input_ids=__A , max_new_tokens=12 )
__lowercase : Tuple = tokenizer.batch_decode(__A , skip_special_tokens=__A )
__lowercase : Dict = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__A )
__lowercase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__A )
__lowercase : List[Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(__A , __A )
self.assertListEqual(__A , [non_padded_sentence, padded_sentence] )
| 359
|
def snake_case_ ( lowerCAmelCase_ : int = 200 ):
__lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__lowercase : List[str] = [0] * (pence + 1)
__lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 306
| 0
|
def _lowerCAmelCase ( lowerCAmelCase_ : list ):
__lowercase : Any = len(lowerCAmelCase_ )
for i in range(1 , lowerCAmelCase_ ):
__lowercase : Dict = collection[i]
__lowercase : Optional[int] = 0
__lowercase : List[Any] = i - 1
while low <= high:
__lowercase : List[Any] = (low + high) // 2
if val < collection[mid]:
__lowercase : Union[str, Any] = mid - 1
else:
__lowercase : Union[str, Any] = mid + 1
for j in range(lowerCAmelCase_ , lowerCAmelCase_ , -1 ):
__lowercase : Any = collection[j - 1]
__lowercase : Any = val
return collection
if __name__ == "__main__":
lowerCamelCase : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 360
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a )
| 306
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Union[str, Any] , *__a : str , **__a : Optional[Any] ) -> Any:
"""simple docstring"""
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def lowerCAmelCase ( self : int , __a : int=None , __a : int=None , __a : Any=None ) -> int:
"""simple docstring"""
__lowercase : Tuple = {}
__lowercase : Dict = {}
if prompt is not None:
__lowercase : Dict = prompt
if generate_kwargs is not None:
__lowercase : Any = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__lowercase : Any = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
__lowercase : str = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[int] , __a : List[Any] , **__a : Union[str, Any] ) -> Any:
"""simple docstring"""
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def lowerCAmelCase ( self : int , __a : Any , __a : Optional[Any]=None ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F"Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
__lowercase : Optional[int] = self.model.config.model_type
if model_type == "git":
__lowercase : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
__lowercase : Dict = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
__lowercase : Optional[int] = [self.tokenizer.cls_token_id] + input_ids
__lowercase : Tuple = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
__lowercase : Tuple = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__lowercase : Any = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
__lowercase : Dict = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
__lowercase : Union[str, Any] = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__lowercase : Tuple = None
return model_inputs
def lowerCAmelCase ( self : List[Any] , __a : List[Any] , __a : Tuple=None ) -> Optional[int]:
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , lowerCamelCase_ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
__lowercase : Dict = None
if generate_kwargs is None:
__lowercase : str = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__lowercase : List[str] = model_inputs.pop(self.model.main_input_name )
__lowercase : str = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def lowerCAmelCase ( self : Tuple , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = []
for output_ids in model_outputs:
__lowercase : List[Any] = {
"""generated_text""": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 361
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
for attribute in key.split(""".""" ):
__lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__lowercase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase : Dict = value
elif weight_type == "weight_g":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowercase : List[Any] = value
elif weight_type == "bias":
__lowercase : int = value
elif weight_type == "running_mean":
__lowercase : List[Any] = value
elif weight_type == "running_var":
__lowercase : int = value
elif weight_type == "num_batches_tracked":
__lowercase : int = value
elif weight_type == "inv_freq":
__lowercase : Optional[Any] = value
else:
__lowercase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = []
__lowercase : Any = fairseq_model.state_dict()
__lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase : Tuple = True
if "*" in mapped_key:
__lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
__lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "pos_bias_u" in name:
__lowercase : Any = None
elif "pos_bias_v" in name:
__lowercase : Tuple = None
elif "weight_g" in name:
__lowercase : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__lowercase : Dict = """weight_v"""
elif "bias" in name:
__lowercase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase : str = """weight"""
elif "running_mean" in name:
__lowercase : str = """running_mean"""
elif "inv_freq" in name:
__lowercase : List[Any] = """inv_freq"""
elif "running_var" in name:
__lowercase : Any = """running_var"""
elif "num_batches_tracked" in name:
__lowercase : Any = """num_batches_tracked"""
else:
__lowercase : Optional[int] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowercase : int = name.split(""".""" )
__lowercase : Optional[Any] = int(items[0] )
__lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ):
if config_path is not None:
__lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" )
else:
__lowercase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase : Tuple = """rotary"""
if is_finetuned:
if dict_path:
__lowercase : Any = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : List[Any] = target_dict.pad_index
__lowercase : Optional[int] = target_dict.bos_index
__lowercase : List[Any] = target_dict.eos_index
__lowercase : List[str] = len(target_dict.symbols )
__lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase : int = 0
__lowercase : Any = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
__lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False
__lowercase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
__lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
__lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
__lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
__lowercase : Dict = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : Any = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 306
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class lowerCAmelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Any = """transfo-xl"""
_A : str = ["""mems"""]
_A : int = {
"""n_token""": """vocab_size""",
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , __a : Dict=267735 , __a : Any=[20000, 40000, 200000] , __a : Any=1024 , __a : int=1024 , __a : List[str]=16 , __a : Union[str, Any]=64 , __a : List[Any]=4096 , __a : Optional[Any]=4 , __a : List[Any]=False , __a : str=18 , __a : Dict=1600 , __a : Optional[Any]=1000 , __a : List[str]=True , __a : str=True , __a : List[str]=0 , __a : List[str]=-1 , __a : Optional[int]=True , __a : Dict=0.1 , __a : int=0.0 , __a : str=True , __a : List[str]="normal" , __a : Any=0.01 , __a : Optional[Any]=0.01 , __a : Optional[Any]=0.02 , __a : Union[str, Any]=1E-5 , __a : Dict=0 , **__a : Union[str, Any] , ) -> Dict:
"""simple docstring"""
__lowercase : str = vocab_size
__lowercase : List[str] = []
self.cutoffs.extend(snake_case__ )
if proj_share_all_but_first:
__lowercase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
__lowercase : Tuple = [False] + [False] * len(self.cutoffs )
__lowercase : Optional[Any] = d_model
__lowercase : Optional[int] = d_embed
__lowercase : Optional[int] = d_head
__lowercase : Any = d_inner
__lowercase : int = div_val
__lowercase : Dict = pre_lnorm
__lowercase : Tuple = n_layer
__lowercase : Optional[Any] = n_head
__lowercase : Optional[int] = mem_len
__lowercase : Any = same_length
__lowercase : Any = attn_type
__lowercase : int = clamp_len
__lowercase : str = sample_softmax
__lowercase : List[Any] = adaptive
__lowercase : Union[str, Any] = dropout
__lowercase : Dict = dropatt
__lowercase : int = untie_r
__lowercase : Optional[int] = init
__lowercase : Dict = init_range
__lowercase : Any = proj_init_std
__lowercase : int = init_std
__lowercase : int = layer_norm_epsilon
super().__init__(eos_token_id=snake_case__ , **snake_case__ )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit." )
return -1
@max_position_embeddings.setter
def lowerCAmelCase ( self : str , __a : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit." )
| 362
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
__lowercase : str = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = '''T5Config'''
def snake_case_ ( lowerCAmelCase_ : jnp.array , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
__lowercase : Tuple = jnp.zeros_like(_lowercase )
__lowercase : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__lowercase : Dict = shifted_input_ids.at[:, 0].set(_lowercase )
__lowercase : Optional[Any] = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class lowerCAmelCase ( a__ ):
'''simple docstring'''
_A : Any = """mt5"""
_A : Dict = MTaConfig
class lowerCAmelCase ( a__ ):
'''simple docstring'''
_A : str = """mt5"""
_A : Tuple = MTaConfig
class lowerCAmelCase ( a__ ):
'''simple docstring'''
_A : List[str] = """mt5"""
_A : Tuple = MTaConfig
| 363
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
| 306
| 0
|
import logging
from transformers.configuration_utils import PretrainedConfig
lowerCamelCase : Any = logging.getLogger(__name__)
class lowerCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
_A : List[Any] = "masked_bert"
def __init__( self : Dict , __a : Any=30522 , __a : Dict=768 , __a : List[str]=12 , __a : Dict=12 , __a : Any=3072 , __a : Optional[int]="gelu" , __a : int=0.1 , __a : Optional[Any]=0.1 , __a : Optional[Any]=512 , __a : Optional[Any]=2 , __a : int=0.02 , __a : Dict=1E-12 , __a : Dict=0 , __a : Union[str, Any]="topK" , __a : int="constant" , __a : str=0.0 , **__a : List[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=a__ , **a__ )
__lowercase : Any = vocab_size
__lowercase : List[Any] = hidden_size
__lowercase : str = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Union[str, Any] = hidden_act
__lowercase : int = intermediate_size
__lowercase : str = hidden_dropout_prob
__lowercase : str = attention_probs_dropout_prob
__lowercase : List[Any] = max_position_embeddings
__lowercase : Optional[Any] = type_vocab_size
__lowercase : Optional[int] = initializer_range
__lowercase : Dict = layer_norm_eps
__lowercase : str = pruning_method
__lowercase : str = mask_init
__lowercase : Dict = mask_scale
| 364
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 306
| 0
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
_A : Optional[str] = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
_A : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
_A : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
_A : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
_A : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
_A : Optional[float] = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
_A : Optional[int] = field(
default=10000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
_A : Optional[float] = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} )
_A : Optional[str] = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
_A : Optional[int] = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
_A : Optional[int] = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
_A : Optional[bool] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
_A : Optional[int] = field(default=50000 , metadata={'''help''': '''Maximum number of training steps.'''} )
_A : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
_A : Optional[int] = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
_A : Optional[int] = field(default=1 , metadata={'''help''': '''Training seed.'''} )
_A : Optional[int] = field(
default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
_A : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
_A : Optional[bool] = field(default=__lowerCAmelCase , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
_A : Optional[str] = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
_A : Optional[int] = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
_A : Optional[int] = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
_A : Optional[int] = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
_A : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
_A : Optional[int] = field(default=__lowerCAmelCase , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
_A : Optional[int] = field(
default=__lowerCAmelCase , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
_A : Optional[bool] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
_A : Optional[float] = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
_A : Optional[int] = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
_A : Optional[int] = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
_A : Optional[float] = field(default=0.9_5 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
_A : Optional[int] = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
_A : Optional[int] = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
_A : Optional[int] = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
_A : Optional[str] = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
_A : Optional[str] = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
_A : Optional[int] = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[int] = field(
default=__lowerCAmelCase , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
_A : Optional[str] = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
_A : Optional[str] = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
_A : Optional[int] = field(
default=100000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
_A : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
_A : Optional[float] = field(
default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
_A : Optional[float] = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
_A : Optional[float] = field(
default=0.2_5 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
_A : Optional[float] = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
_A : Optional[float] = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
_A : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
_A : Optional[bool] = field(
default=__lowerCAmelCase , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
_A : Optional[float] = field(
default=0.8_5 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[str] = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
_A : Optional[str] = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
_A : Optional[str] = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
_A : Optional[int] = field(default=200000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
_A : Optional[int] = field(
default=32768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
_A : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
_A : Optional[bool] = field(default=__lowerCAmelCase , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
_A : Optional[str] = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
_A : Optional[str] = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
_A : Optional[int] = field(default=__lowerCAmelCase , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[str] = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
_A : Optional[str] = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
_A : Optional[str] = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
_A : Optional[bool] = field(default=__lowerCAmelCase , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 365
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : List[str] = 2_56
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
__lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( lowerCAmelCase_ : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , *,
__a : float = 0.85 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = duplication_jaccard_threshold
__lowercase : Optional[Any] = NUM_PERM
__lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase : List[str] = defaultdict(__a )
def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None:
"""simple docstring"""
__lowercase : List[Any] = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase : Dict = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase : List[str] = [base] + list(__a )
# reformat the cluster to be a list of dict
__lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def lowerCAmelCase ( self : Any , __a : int ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase , __lowercase : Union[str, Any] = element
__lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ):
__lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[str] = get_tokens(lowerCAmelCase_ )
__lowercase : Dict = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : List[str] = None
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = []
for elementa in cluster:
__lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase : Dict = 1
extremes.append(lowerCAmelCase_ )
return extremes
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
global _shared_dataset
__lowercase : Tuple = dataset
__lowercase : Optional[int] = []
__lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ):
__lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase : int = {}
__lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase : Optional[Any] = element
__lowercase : int = duplicate_indices - set(extreme_dict.keys() )
__lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(lowerCAmelCase_ )}" )
print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" )
print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" )
return ds_filter, duplicate_clusters
| 306
| 0
|
def snake_case_ ( lowerCAmelCase_ : int = 1000 ):
__lowercase : Union[str, Any] = -1
__lowercase : List[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowercase : Tuple = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowercase : int = n - a - b
if c * c == (a * a + b * b):
__lowercase : Tuple = a * b * c
if candidate >= product:
__lowercase : Optional[int] = candidate
return product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 366
|
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''image_processor''', '''feature_extractor''']
_A : List[Any] = '''TvltImageProcessor'''
_A : Optional[int] = '''TvltFeatureExtractor'''
def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__(image_processor=__a , feature_extractor=__a )
__lowercase : Union[str, Any] = image_processor
__lowercase : Tuple = feature_extractor
def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowercase : Tuple = None
if images is not None:
__lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
__lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
__lowercase : Optional[Any] = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
__lowercase : Tuple = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.image_processor.model_input_names
__lowercase : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 306
| 0
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Optional[int] , __a : Optional[Any]=13 , __a : Any=7 , __a : Optional[Any]=True , __a : Dict=True , __a : List[Any]=True , __a : Any=True , __a : int=99 , __a : Union[str, Any]=32 , __a : Union[str, Any]=5 , __a : int=4 , __a : str=4 , __a : Any="gelu" , __a : Union[str, Any]=0.0 , __a : Tuple=0.1 , __a : Optional[Any]=True , __a : Union[str, Any]=512 , __a : Tuple=16 , __a : List[Any]=2 , __a : List[Any]=0.02 , __a : Union[str, Any]=3 , __a : List[str]=4 , __a : Union[str, Any]=None , ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = parent
__lowercase : List[Any] = batch_size
__lowercase : List[Any] = seq_length
__lowercase : Optional[Any] = is_training
__lowercase : Optional[Any] = use_input_mask
__lowercase : Dict = use_token_type_ids
__lowercase : List[str] = use_labels
__lowercase : Dict = vocab_size
__lowercase : Union[str, Any] = hidden_size
__lowercase : Any = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Optional[Any] = intermediate_multiple_size
__lowercase : List[str] = hidden_act
__lowercase : Dict = hidden_dropout
__lowercase : Union[str, Any] = attention_dropout
__lowercase : Any = weight_tying
__lowercase : Optional[Any] = max_position_embeddings
__lowercase : Optional[Any] = type_vocab_size
__lowercase : Tuple = type_sequence_label_size
__lowercase : Optional[Any] = initializer_range
__lowercase : str = num_labels
__lowercase : Optional[Any] = num_choices
__lowercase : Tuple = scope
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Any = None
if self.use_labels:
__lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
__lowercase : List[str] = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase ( self : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = GPTNeoXJapaneseModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__lowercase : Any = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
__lowercase : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Tuple , __a : Optional[Any] , __a : Union[str, Any] , __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Any = True
__lowercase : Optional[Any] = GPTNeoXJapaneseModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__lowercase : List[str] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Dict , __a : Union[str, Any] , __a : Dict , __a : Optional[Any] , __a : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
__lowercase : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = True
__lowercase : List[str] = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
__lowercase : List[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
__lowercase : Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowercase : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowercase : Any = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
__lowercase : Dict = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
__lowercase : Dict = output_from_no_past['''hidden_states'''][0]
__lowercase : Dict = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )['''hidden_states'''][0]
# select random slice
__lowercase : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowercase : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[Any] = config_and_inputs
__lowercase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_A : Dict = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_A : Tuple = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_A : Optional[int] = False
_A : str = False
_A : Dict = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = GPTNeoXJapaneseModelTester(self )
__lowercase : List[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowercase : Tuple = None
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase__ )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : List[Any] = '''abeja/gpt-neox-japanese-2.7b'''
__lowercase : str = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
__lowercase : List[str] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
__lowercase : Any = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCamelCase__ )
__lowercase : Union[str, Any] = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCamelCase__ )
__lowercase : Union[str, Any] = []
for prompt in prompts:
__lowercase : int = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ).input_ids
__lowercase : List[str] = model.generate(UpperCamelCase__ , max_length=50 )
__lowercase : int = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
| 367
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : int = batch_size
__lowercase : Any = seq_length
__lowercase : str = is_training
__lowercase : str = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : int = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : Union[str, Any] = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : Tuple = scope
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
__lowercase : str = model(__a , attention_mask=__a )
__lowercase : List[Any] = model(__a )
__lowercase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowercase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Any = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[str] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = False
_A : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = ()
_A : List[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Optional[Any] = True
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = EsmModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : List[str] = EsmEmbeddings(config=__a )
__lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowercase : int = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : Optional[Any] = EsmEmbeddings(config=__a )
__lowercase : Optional[int] = torch.empty(2 , 4 , 30 )
__lowercase : Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( __a ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[str] = model(__a )[0]
__lowercase : Union[str, Any] = 33
__lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
__lowercase : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : Any = model(__a )[0]
# compare the actual values for a slice.
__lowercase : int = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 306
| 0
|
import torch
from diffusers import StableDiffusionPipeline
lowerCamelCase : Dict = "path-to-your-trained-model"
lowerCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowerCamelCase : Union[str, Any] = "A photo of sks dog in a bucket"
lowerCamelCase : List[Any] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 368
|
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 306
| 0
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = ort.SessionOptions()
__lowercase : List[Any] = False
return options
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__lowercase : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__lowercase : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowercase : Union[str, Any] = """A red cat sitting on a park bench"""
__lowercase : str = np.random.RandomState(0 )
__lowercase : str = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="""np""" , )
__lowercase : Dict = output.images
__lowercase : str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__lowercase : Union[str, Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__lowercase : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__lowercase : Any = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase : Any = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
__lowercase : int = """A red cat sitting on a park bench"""
__lowercase : List[str] = np.random.RandomState(0 )
__lowercase : Tuple = pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase__ , output_type="""np""" , )
__lowercase : List[str] = output.images
__lowercase : str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__lowercase : Union[str, Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 369
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = (DPMSolverSDEScheduler,)
_A : Dict = 10
def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**__a )
return config
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.scheduler_classes[0]
__lowercase : List[str] = self.get_scheduler_config()
__lowercase : Any = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[Any] = self.dummy_model()
__lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Optional[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Optional[Any] = scheduler.step(__a , __a , __a )
__lowercase : str = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowercase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[int] = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Dict = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Dict = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[int] = model(__a , __a )
__lowercase : Optional[int] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : List[str] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config()
__lowercase : Optional[int] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : int = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase : int = scheduler.scale_model_input(__a , __a )
__lowercase : List[str] = model(__a , __a )
__lowercase : List[str] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : List[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.scheduler_classes[0]
__lowercase : List[Any] = self.get_scheduler_config()
__lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : List[str] = self.dummy_model()
__lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
__lowercase : str = sample.to(__a )
for t in scheduler.timesteps:
__lowercase : List[Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Any = scheduler.step(__a , __a , __a )
__lowercase : Optional[Any] = output.prev_sample
__lowercase : Any = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 306
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCAmelCase ( metaclass=_lowerCAmelCase ):
'''simple docstring'''
_A : str = ["keras_nlp"]
def __init__( self : Optional[int] , *__a : Optional[Any] , **__a : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""keras_nlp"""] )
| 370
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : str = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase : List[str] = args.per_device_eval_batch_size
lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : List[str] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ )
# start time
__lowercase : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowercase : int = time.time()
__lowercase : Union[str, Any] = end_time - start_time
__lowercase : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names
lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def snake_case_ ( lowerCAmelCase_ : int ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowercase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowercase : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowercase : Any = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ )
__lowercase : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowercase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowercase : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCamelCase : Tuple = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase : Union[str, Any] = default_data_collator
lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowercase : int = postprocess_qa_predictions(
examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowercase : Optional[int] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ )
lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def snake_case_ ( lowerCAmelCase_ : str ):
return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase : int = 0.0
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = timeit.default_timer()
lowerCamelCase : List[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : Dict = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 306
| 0
|
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : int = np.max(a__ , axis=-1 , keepdims=a__ )
__lowercase : List[Any] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=a__ )
class lowerCAmelCase ( _a ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] , **__a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = {}
if "second_text" in kwargs:
__lowercase : Optional[int] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCAmelCase ( self : List[str] , __a : Dict , __a : Any=None ) -> List[str]:
"""simple docstring"""
return self.tokenizer(__lowerCAmelCase , text_pair=__lowerCAmelCase , return_tensors=self.framework )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.model(**__lowerCAmelCase )
def lowerCAmelCase ( self : str , __a : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = model_outputs.logits[0].numpy()
__lowercase : Any = softmax(__lowerCAmelCase )
__lowercase : Dict = np.argmax(__lowerCAmelCase )
__lowercase : int = self.model.config.idalabel[best_class]
__lowercase : Any = probabilities[best_class].item()
__lowercase : str = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 371
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''nllb-moe'''
_A : List[str] = ['''past_key_values''']
_A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any:
"""simple docstring"""
__lowercase : int = vocab_size
__lowercase : List[Any] = max_position_embeddings
__lowercase : Tuple = d_model
__lowercase : str = encoder_ffn_dim
__lowercase : List[str] = encoder_layers
__lowercase : int = encoder_attention_heads
__lowercase : List[Any] = decoder_ffn_dim
__lowercase : int = decoder_layers
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : Union[str, Any] = dropout
__lowercase : str = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : List[Any] = activation_function
__lowercase : List[str] = init_std
__lowercase : Optional[int] = encoder_layerdrop
__lowercase : str = decoder_layerdrop
__lowercase : Dict = use_cache
__lowercase : Optional[Any] = encoder_layers
__lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : List[Any] = router_z_loss_coef
__lowercase : Tuple = router_aux_loss_coef
__lowercase : str = decoder_sparse_step
__lowercase : Any = encoder_sparse_step
__lowercase : str = num_experts
__lowercase : List[Any] = expert_capacity
__lowercase : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase : Optional[int] = router_dtype
__lowercase : Any = router_ignore_padding_tokens
__lowercase : Optional[Any] = batch_prioritized_routing
__lowercase : str = second_expert_policy
__lowercase : List[str] = normalize_router_prob_before_dropping
__lowercase : List[Any] = moe_eval_capacity_token_fraction
__lowercase : List[str] = moe_token_dropout
__lowercase : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 306
| 0
|
"""simple docstring"""
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] ):
return "\n".join(
F"{number} * {i} = {number * i}" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 350
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[Any] = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['''PoolFormerFeatureExtractor''']
lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 306
| 0
|
import os
def snake_case_ ( ):
with open(os.path.dirname(A__ ) + """/grid.txt""" ) as f:
__lowercase : Union[str, Any] = [] # noqa: E741
for _ in range(20 ):
l.append([int(A__ ) for x in f.readline().split()] )
__lowercase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
__lowercase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__lowercase : Union[str, Any] = temp
# down
for i in range(17 ):
for j in range(20 ):
__lowercase : str = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__lowercase : str = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__lowercase : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__lowercase : str = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
__lowercase : Tuple = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__lowercase : Union[str, Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 351
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Dict = {
'''configuration_efficientnet''': [
'''EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientNetConfig''',
'''EfficientNetOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['''EfficientNetImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : str = [
'''EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientNetForImageClassification''',
'''EfficientNetModel''',
'''EfficientNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 352
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__lowercase : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowercase : Optional[Any] = model(__a )["""last_hidden_state"""]
__lowercase : Any = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__lowercase : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 306
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 353
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
__lowercase : str = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Tuple = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Optional[Any] = True
if a[i].islower():
__lowercase : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def snake_case_ ( *lowerCAmelCase_ : str ):
with open(lowerCAmelCase_ , """r""" ) as fh:
fcntl.flock(lowerCAmelCase_ , fcntl.LOCK_EX )
try:
print(*lowerCAmelCase_ )
finally:
fcntl.flock(lowerCAmelCase_ , fcntl.LOCK_UN )
lowerCamelCase : List[str] = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
lowerCamelCase : str = torch.device('''cuda''', local_rank)
lowerCamelCase : Tuple = socket.gethostname()
lowerCamelCase : List[str] = f'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
lowerCamelCase : Optional[Any] = dist.get_rank()
lowerCamelCase : Dict = dist.get_world_size()
printflock(f'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(f'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(f'''{gpu} is broken''')
raise
| 354
|
from scipy.stats import spearmanr
import datasets
lowerCamelCase : List[str] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCamelCase : List[str] = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCamelCase : Union[str, Any] = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = spearmanr(__a , __a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 306
| 0
|
from math import ceil
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int ):
__lowercase : Union[str, Any] = list(range(0 , lowerCAmelCase_ ) )
__lowercase : List[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowercase : Optional[Any] = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCAmelCase_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCAmelCase_ )
# Missing blocks
__lowercase : Dict = [i for i in blocks if i not in device_map_blocks]
__lowercase : Any = [i for i in device_map_blocks if i not in blocks]
if len(lowerCAmelCase_ ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : List[Any] = list(range(lowerCAmelCase_ ) )
__lowercase : str = int(ceil(n_layers / len(lowerCAmelCase_ ) ) )
__lowercase : List[str] = [layers[i : i + n_blocks] for i in range(0 , lowerCAmelCase_ , lowerCAmelCase_ )]
return dict(zip(lowerCAmelCase_ , lowerCAmelCase_ ) )
| 355
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 306
| 0
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_A : str = IFInpaintingSuperResolutionPipeline
_A : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_A : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_A : Dict = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Any=0 ) -> Dict:
"""simple docstring"""
if str(__lowerCamelCase ).startswith("""mps""" ):
__lowercase : Union[str, Any] = torch.manual_seed(__lowerCamelCase )
else:
__lowercase : int = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
__lowercase : str = floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
__lowercase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
__lowercase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
__lowercase : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
self._test_save_load_local()
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 356
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 306
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : List[Any] = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : Optional[int] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowercase : Optional[Any] = 192
__lowercase : Optional[Any] = 768
__lowercase : int = 12
__lowercase : Dict = 3
__lowercase : Tuple = [800, 1333]
__lowercase : Tuple = False
elif yolos_name == "yolos_s_dWr":
__lowercase : List[Any] = 330
__lowercase : List[str] = 14
__lowercase : Union[str, Any] = 6
__lowercase : Dict = 1320
elif "yolos_s" in yolos_name:
__lowercase : Optional[int] = 384
__lowercase : Tuple = 1536
__lowercase : str = 12
__lowercase : int = 6
elif "yolos_b" in yolos_name:
__lowercase : Optional[int] = [800, 1344]
__lowercase : str = 91
__lowercase : int = 'huggingface/label-files'
__lowercase : List[Any] = 'coco-detection-id2label.json'
__lowercase : int = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
__lowercase : Optional[Any] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
__lowercase : Union[str, Any] = idalabel
__lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def snake_case_ ( lowerCAmelCase_ : dict , lowerCAmelCase_ : YolosConfig , lowerCAmelCase_ : bool = False ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowercase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
__lowercase : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__lowercase : Dict = in_proj_weight[: config.hidden_size, :]
__lowercase : Any = in_proj_bias[: config.hidden_size]
__lowercase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowercase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowercase : int = in_proj_weight[-config.hidden_size :, :]
__lowercase : List[str] = in_proj_bias[-config.hidden_size :]
def snake_case_ ( lowerCAmelCase_ : str ):
if "backbone" in name:
__lowercase : Dict = name.replace("""backbone""" , """vit""" )
if "cls_token" in name:
__lowercase : Any = name.replace("""cls_token""" , """embeddings.cls_token""" )
if "det_token" in name:
__lowercase : Optional[int] = name.replace("""det_token""" , """embeddings.detection_tokens""" )
if "mid_pos_embed" in name:
__lowercase : List[str] = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" )
if "pos_embed" in name:
__lowercase : Any = name.replace("""pos_embed""" , """embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__lowercase : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "blocks" in name:
__lowercase : List[Any] = name.replace("""blocks""" , """encoder.layer""" )
if "attn.proj" in name:
__lowercase : Any = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__lowercase : Dict = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__lowercase : Optional[int] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__lowercase : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__lowercase : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__lowercase : Optional[Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "class_embed" in name:
__lowercase : Optional[Any] = name.replace("""class_embed""" , """class_labels_classifier""" )
if "bbox_embed" in name:
__lowercase : int = name.replace("""bbox_embed""" , """bbox_predictor""" )
if "vit.norm" in name:
__lowercase : Any = name.replace("""vit.norm""" , """vit.layernorm""" )
return name
def snake_case_ ( lowerCAmelCase_ : dict , lowerCAmelCase_ : YolosForObjectDetection ):
for key in orig_state_dict.copy().keys():
__lowercase : int = orig_state_dict.pop(UpperCAmelCase_ )
if "qkv" in key:
__lowercase : str = key.split(""".""" )
__lowercase : int = int(key_split[2] )
__lowercase : Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowercase : Any = val[:dim, :]
__lowercase : Union[str, Any] = val[
dim : dim * 2, :
]
__lowercase : Any = val[-dim:, :]
else:
__lowercase : Optional[int] = val[:dim]
__lowercase : List[str] = val[dim : dim * 2]
__lowercase : List[Any] = val[-dim:]
else:
__lowercase : Tuple = val
return orig_state_dict
def snake_case_ ( ):
__lowercase : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase : List[Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : bool = False ):
__lowercase : Optional[Any] = get_yolos_config(UpperCAmelCase_ )
# load original state_dict
__lowercase : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location="""cpu""" )['model']
# load 🤗 model
__lowercase : Optional[Any] = YolosForObjectDetection(UpperCAmelCase_ )
model.eval()
__lowercase : int = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by YolosImageProcessor
__lowercase : Optional[Any] = 800 if yolos_name != 'yolos_ti' else 512
__lowercase : Dict = YolosImageProcessor(format="""coco_detection""" , size=UpperCAmelCase_ )
__lowercase : int = image_processor(images=prepare_img() , return_tensors="""pt""" )
__lowercase : int = model(**UpperCAmelCase_ )
__lowercase : List[str] = outputs.logits, outputs.pred_boxes
__lowercase : Dict = None, None
if yolos_name == "yolos_ti":
__lowercase : Dict = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__lowercase : Tuple = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
__lowercase : Tuple = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__lowercase : Tuple = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
__lowercase : Optional[Any] = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__lowercase : Tuple = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
__lowercase : str = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__lowercase : Tuple = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
__lowercase : str = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__lowercase : Dict = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
__lowercase : Any = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print("""Pushing to the hub...""" )
__lowercase : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(UpperCAmelCase_ , organization="""hustvl""" )
model.push_to_hub(UpperCAmelCase_ , organization="""hustvl""" )
if __name__ == "__main__":
lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase : str = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 357
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 306
| 0
|
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ): # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def snake_case_ ( ):
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
__lowercase : Dict = [1, 2, 3]
with pytest.raises(_A ):
with parallel_backend("""unsupported backend""" ):
map_nested(_A , _A , num_proc=2 )
with pytest.raises(_A ):
with parallel_backend("""unsupported backend""" ):
map_nested(_A , _A , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" , [2, -1] )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
__lowercase : Optional[int] = [1, 2]
__lowercase : str = {'a': 1, 'b': 2}
__lowercase : Dict = {'a': [1, 2], 'b': [3, 4]}
__lowercase : Optional[Any] = {'a': {'1': 1}, 'b': 2}
__lowercase : str = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
__lowercase : str = [2, 3]
__lowercase : str = {'a': 2, 'b': 3}
__lowercase : int = {'a': [2, 3], 'b': [4, 5]}
__lowercase : List[Any] = {'a': {'1': 2}, 'b': 3}
__lowercase : Tuple = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend("""spark""" ):
assert map_nested(_A , _A , num_proc=_A ) == expected_map_nested_sa
assert map_nested(_A , _A , num_proc=_A ) == expected_map_nested_sa
assert map_nested(_A , _A , num_proc=_A ) == expected_map_nested_sa
assert map_nested(_A , _A , num_proc=_A ) == expected_map_nested_sa
assert map_nested(_A , _A , num_proc=_A ) == expected_map_nested_sa
| 358
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = CLIPTokenizer
_A : int = CLIPTokenizerFast
_A : str = True
_A : Dict = {}
_A : Any = False
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
super().setUp()
# fmt: off
__lowercase : str = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__lowercase : List[str] = dict(zip(A__ , range(len(A__ ) ) ) )
__lowercase : Tuple = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
__lowercase : str = {"""unk_token""": """<unk>"""}
__lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A__ ) )
def lowerCAmelCase ( self : Any , **__a : Any ) -> int:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A__ )
def lowerCAmelCase ( self : Any , **__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def lowerCAmelCase ( self : List[Any] , __a : Dict ) -> int:
"""simple docstring"""
__lowercase : Optional[int] = """lower newer"""
__lowercase : List[Any] = """lower newer"""
return input_text, output_text
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Tuple = """lower newer"""
__lowercase : Optional[int] = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
__lowercase : Any = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
__lowercase : str = tokens + [tokenizer.unk_token]
__lowercase : str = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
@require_ftfy
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : List[Any] = self.tokenizer_class.from_pretrained(A__ , **A__ )
__lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
__lowercase : str = """A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d."""
__lowercase : Dict = tokenizer_s.tokenize(A__ )
__lowercase : Tuple = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowercase : Union[str, Any] = """xa\u0303y""" + """ """ + """x\xe3y"""
__lowercase : Union[str, Any] = tokenizer_s.tokenize(A__ )
__lowercase : int = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Test that the tokenization is identical on unicode of space type
__lowercase : Tuple = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowercase : Optional[Any] = tokenizer_s.tokenize(A__ )
__lowercase : List[str] = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Test that the tokenization is identical on unicode of line break type
__lowercase : Union[str, Any] = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowercase : Dict = tokenizer_s.tokenize(A__ )
__lowercase : Optional[int] = tokenizer_r.tokenize(A__ )
self.assertListEqual(A__ , A__ )
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowercase : Optional[int] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
__lowercase : str = F"{text_of_1_token} {text_of_1_token}"
__lowercase : List[str] = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , )
__lowercase : Dict = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A__ ) + 1, len(A__ ) + 1 + len(A__ )) , )
__lowercase : Tuple = F" {text}"
__lowercase : Dict = self.rust_tokenizer_class.from_pretrained(
A__ , use_fast=A__ , )
__lowercase : Any = tokenizer_r(A__ , return_offsets_mapping=A__ , add_special_tokens=A__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A__ ) + 1, 1 + len(A__ ) + 1 + len(A__ )) , )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises(A__ ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
| 359
|
def snake_case_ ( lowerCAmelCase_ : int = 200 ):
__lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__lowercase : List[str] = [0] * (pence + 1)
__lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 306
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCamelCase : List[str] = get_tests_dir('''fixtures''')
lowerCamelCase : Union[str, Any] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
lowerCamelCase : int = get_tests_dir('''fixtures/dummy-config.json''')
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : str = 0
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" )
self.assertIsInstance(a__ , a__ )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : List[Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
__lowercase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(a__ ).to_dict()
config_dict.pop("""feature_extractor_type""" )
__lowercase : List[str] = WavaVecaFeatureExtractor(**a__ )
# save in new folder
model_config.save_pretrained(a__ )
config.save_pretrained(a__ )
__lowercase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(a__ )
# make sure private variable is not incorrectly saved
__lowercase : str = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(a__ , a__ )
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
a__ , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowercase : List[Any] = AutoFeatureExtractor.from_pretrained("""bert-base""" )
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(
a__ , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowercase : List[Any] = AutoFeatureExtractor.from_pretrained(a__ , revision="""aaaaaa""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
with self.assertRaisesRegex(
a__ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
__lowercase : Optional[int] = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
with self.assertRaises(a__ ):
__lowercase : Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a__ ):
__lowercase : Optional[int] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=a__ )
__lowercase : List[str] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a__ )
__lowercase : int = AutoFeatureExtractor.from_pretrained(a__ , trust_remote_code=a__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , a__ )
AutoFeatureExtractor.register(a__ , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
AutoFeatureExtractor.register(a__ , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowercase : str = CustomFeatureExtractor.from_pretrained(a__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a__ )
__lowercase : int = AutoFeatureExtractor.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[int] = True
try:
AutoConfig.register("""custom""" , a__ )
AutoFeatureExtractor.register(a__ , a__ )
# If remote code is not set, the default is to use local
__lowercase : str = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
__lowercase : Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
__lowercase : Optional[Any] = AutoFeatureExtractor.from_pretrained(
"""hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=a__ )
self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" )
self.assertTrue(not hasattr(a__ , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 360
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a )
| 306
| 0
|
# Imports
import numpy as np
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , __a : Dict=None , __a : Union[str, Any]=None , __a : Union[str, Any]=None , __a : Optional[Any]=None , __a : Optional[int]=None ) -> Optional[Any]:
"""simple docstring"""
self.set_matricies(red=__A , green=__A , blue=__A , red_edge=__A , nir=__A )
def lowerCAmelCase ( self : Any , __a : Optional[int]=None , __a : Union[str, Any]=None , __a : List[Any]=None , __a : Any=None , __a : str=None ) -> Optional[Any]:
"""simple docstring"""
if red is not None:
__lowercase : Tuple = red
if green is not None:
__lowercase : List[str] = green
if blue is not None:
__lowercase : List[Any] = blue
if red_edge is not None:
__lowercase : int = red_edge
if nir is not None:
__lowercase : Optional[int] = nir
return True
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any]="" , __a : List[Any]=None , __a : List[Any]=None , __a : Optional[Any]=None , __a : int=None , __a : List[Any]=None ) -> str:
"""simple docstring"""
self.set_matricies(red=__A , green=__A , blue=__A , red_edge=__A , nir=__A )
__lowercase : List[Any] = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.nir * (self.red / (self.green**2))
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - self.red) / (self.nir + self.red)
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return (self.nir - self.blue) / (self.nir + self.blue)
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green)
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowerCAmelCase ( self : Tuple , __a : Tuple=0.08 , __a : Optional[int]=1.22 , __a : Any=0.03 ) -> Union[str, Any]:
"""simple docstring"""
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return (self.nir / self.green) - 1
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return (self.nir / self.redEdge) - 1
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
return (self.red - self.blue) / self.red
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : str = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
return self.nir - self.green
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def lowerCAmelCase ( self : Dict , __a : List[Any]=0.16 ) -> str:
"""simple docstring"""
return (self.nir - self.green) / (self.nir + self.green + y)
def lowerCAmelCase ( self : Tuple , __a : Dict=0.5 ) -> Tuple:
"""simple docstring"""
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def lowerCAmelCase ( self : List[str] , __a : Optional[int]=None , __a : Any=None ) -> Tuple:
"""simple docstring"""
return (self.nir - b) / (a * self.red)
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return (self.red + self.green + self.blue) / 30.5
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.nir / self.red
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return (self.rvi() - 1) / (self.rvi() + 1)
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.green / (self.nir + self.red + self.green)
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
return self.nir / (self.nir + self.red + self.green)
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return self.red / (self.nir + self.red + self.green)
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return (self.green - self.red) / (self.green + self.red)
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return (self.red - self.green) / (self.red + self.green)
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
__lowercase : Optional[Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
return self.nir / self.red
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
return (self.ndvi() + 0.5) ** (1 / 2)
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 361
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
for attribute in key.split(""".""" ):
__lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__lowercase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase : Dict = value
elif weight_type == "weight_g":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowercase : List[Any] = value
elif weight_type == "bias":
__lowercase : int = value
elif weight_type == "running_mean":
__lowercase : List[Any] = value
elif weight_type == "running_var":
__lowercase : int = value
elif weight_type == "num_batches_tracked":
__lowercase : int = value
elif weight_type == "inv_freq":
__lowercase : Optional[Any] = value
else:
__lowercase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = []
__lowercase : Any = fairseq_model.state_dict()
__lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase : Tuple = True
if "*" in mapped_key:
__lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
__lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "pos_bias_u" in name:
__lowercase : Any = None
elif "pos_bias_v" in name:
__lowercase : Tuple = None
elif "weight_g" in name:
__lowercase : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__lowercase : Dict = """weight_v"""
elif "bias" in name:
__lowercase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase : str = """weight"""
elif "running_mean" in name:
__lowercase : str = """running_mean"""
elif "inv_freq" in name:
__lowercase : List[Any] = """inv_freq"""
elif "running_var" in name:
__lowercase : Any = """running_var"""
elif "num_batches_tracked" in name:
__lowercase : Any = """num_batches_tracked"""
else:
__lowercase : Optional[int] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowercase : int = name.split(""".""" )
__lowercase : Optional[Any] = int(items[0] )
__lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ):
if config_path is not None:
__lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" )
else:
__lowercase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase : Tuple = """rotary"""
if is_finetuned:
if dict_path:
__lowercase : Any = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : List[Any] = target_dict.pad_index
__lowercase : Optional[int] = target_dict.bos_index
__lowercase : List[Any] = target_dict.eos_index
__lowercase : List[str] = len(target_dict.symbols )
__lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase : int = 0
__lowercase : Any = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
__lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False
__lowercase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
__lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
__lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
__lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
__lowercase : Dict = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : Any = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 306
| 0
|
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def snake_case_ ( lowerCAmelCase_ : str=None , lowerCAmelCase_ : int=None ):
return field(default_factory=lambda: default , metadata=__snake_case )
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : List[Any] = field(
metadata={'''help''': '''The csv file to plot.'''} , )
_A : Optional[int] = field(
default=__a , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
_A : Tuple = field(
default=__a , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
_A : Union[str, Any] = field(
default=__a , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
_A : Dict = field(
default=__a , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
_A : str = field(
default=__a , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
_A : List[Any] = list_field(
default=__a , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
try:
int(__snake_case )
return True
except ValueError:
return False
def snake_case_ ( lowerCAmelCase_ : List[str] ):
try:
float(__snake_case )
return True
except ValueError:
return False
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict , __a : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Dict = args
__lowercase : Any = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
__lowercase : Dict = csv.DictReader(__a )
for row in reader:
__lowercase : List[Any] = row["""model"""]
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
__lowercase : List[Any] = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
__lowercase : Dict = float(row["""result"""] )
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : Tuple = plt.subplots()
__lowercase : Union[str, Any] = """Time usage""" if self.args.is_time else """Memory usage"""
__lowercase : Any = title_str + """ for training""" if self.args.is_train else title_str + """ for inference"""
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__lowercase : str = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
__lowercase : Any = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
__lowercase : Any = self.result_dict[model_name]["""result"""]
((__lowercase) , (__lowercase)) : Tuple = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__lowercase : Optional[int] = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__lowercase : List[str] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__a , )
else:
__lowercase : List[Any] = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__lowercase) , (__lowercase)) : Any = (
("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""")
)
__lowercase : Optional[Any] = np.asarray(__a , __a )[: len(__a )]
plt.scatter(
__a , __a , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}" )
plt.plot(__a , __a , """--""" )
title_str += F" {label_model_name} vs."
__lowercase : List[str] = title_str[:-4]
__lowercase : Optional[int] = """Time in s""" if self.args.is_time else """Memory in MB"""
# plot
plt.title(__a )
plt.xlabel(__a )
plt.ylabel(__a )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def snake_case_ ( ):
__lowercase : int = HfArgumentParser(__snake_case )
__lowercase : int = parser.parse_args_into_dataclasses()[0]
__lowercase : Union[str, Any] = Plot(args=__snake_case )
plot.plot()
if __name__ == "__main__":
main()
| 362
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
__lowercase : str = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 363
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
| 306
| 0
|
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def lowerCAmelCase ( self : Optional[Any] , __a : Optional[int]=0 ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = np.random.RandomState(_A )
__lowercase : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_A )
__lowercase : List[str] = self.get_dummy_inputs()
__lowercase : Optional[Any] = pipe(**_A ).images
__lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Any = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_A )
pipe.set_progress_bar_config(disable=_A )
__lowercase : Any = self.get_dummy_inputs()
__lowercase : Any = pipe(**_A ).images
__lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Any = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Any = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__lowercase : str = self.get_dummy_inputs()
__lowercase : str = pipe(**_A ).images
__lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : int = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__lowercase : str = self.get_dummy_inputs()
__lowercase : Any = pipe(**_A ).images
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : List[Any] = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : Any = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__lowercase : Optional[Any] = self.get_dummy_inputs()
__lowercase : Tuple = pipe(**_A ).images
__lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : Any = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
__lowercase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
__lowercase : Optional[Any] = self.get_dummy_inputs()
__lowercase : List[Any] = pipe(**_A ).images
__lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
__lowercase : int = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_A )
__lowercase : Tuple = self.get_dummy_inputs()
__lowercase : Optional[Any] = 3 * [inputs["""prompt"""]]
# forward
__lowercase : Union[str, Any] = pipe(**_A )
__lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
__lowercase : Union[str, Any] = self.get_dummy_inputs()
__lowercase : Optional[Any] = 3 * [inputs.pop("""prompt""" )]
__lowercase : str = pipe.tokenizer(
_A , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_A , return_tensors="""np""" , )
__lowercase : int = text_inputs["""input_ids"""]
__lowercase : Optional[int] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
__lowercase : List[Any] = prompt_embeds
# forward
__lowercase : int = pipe(**_A )
__lowercase : List[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" )
pipe.set_progress_bar_config(disable=_A )
__lowercase : Tuple = self.get_dummy_inputs()
__lowercase : Dict = 3 * ["""this is a negative prompt"""]
__lowercase : str = negative_prompt
__lowercase : Optional[int] = 3 * [inputs["""prompt"""]]
# forward
__lowercase : Any = pipe(**_A )
__lowercase : Union[str, Any] = output.images[0, -3:, -3:, -1]
__lowercase : str = self.get_dummy_inputs()
__lowercase : Union[str, Any] = 3 * [inputs.pop("""prompt""" )]
__lowercase : str = []
for p in [prompt, negative_prompt]:
__lowercase : int = pipe.tokenizer(
_A , padding="""max_length""" , max_length=pipe.tokenizer.model_max_length , truncation=_A , return_tensors="""np""" , )
__lowercase : Any = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
__lowercase , __lowercase : int = embeds
# forward
__lowercase : List[str] = pipe(**_A )
__lowercase : Optional[int] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : str = ort.SessionOptions()
__lowercase : List[str] = False
return options
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_A )
__lowercase : Union[str, Any] = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
__lowercase : List[str] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="""np""" )
__lowercase : Dict = output.images
__lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase : List[str] = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase : List[str] = DDIMScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_A , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_A )
__lowercase : Tuple = """open neural network exchange"""
__lowercase : List[str] = np.random.RandomState(0 )
__lowercase : Union[str, Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_A , output_type="""np""" )
__lowercase : Union[str, Any] = output.images
__lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase : Optional[int] = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase : List[str] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" )
__lowercase : str = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_A , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_A )
__lowercase : Union[str, Any] = """open neural network exchange"""
__lowercase : Optional[Any] = np.random.RandomState(0 )
__lowercase : Optional[int] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_A , output_type="""np""" )
__lowercase : Tuple = output.images
__lowercase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase : str = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : List[Any] = 0
def test_callback_fn(__a : Dict , __a : Dict , __a : Dict ) -> None:
__lowercase : str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
__lowercase : List[str] = latents[0, -3:, -3:, -1]
__lowercase : Any = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
__lowercase : Dict = latents[0, -3:, -3:, -1]
__lowercase : Union[str, Any] = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
__lowercase : Dict = False
__lowercase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_A )
__lowercase : Any = """Andromeda galaxy in a bottle"""
__lowercase : str = np.random.RandomState(0 )
pipe(
prompt=_A , num_inference_steps=5 , guidance_scale=7.5 , generator=_A , callback=_A , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_A , _A )
assert pipe.safety_checker is None
__lowercase : List[Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_A )
__lowercase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(_A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__lowercase : Dict = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
| 364
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 306
| 0
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase : str = datasets.load_iris()
lowerCamelCase : str = np.array(data['''data'''])
lowerCamelCase : int = np.array(data['''target'''])
lowerCamelCase : List[Any] = data['''target_names''']
lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase : Tuple = train_test_split(X, y)
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] ):
return np.linalg.norm(np.array(snake_case__ ) - np.array(snake_case__ ) )
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Any=5 ):
__lowercase : List[Any] = zip(snake_case__ , snake_case__ )
# List of distances of all points from the point to be classified
__lowercase : Optional[Any] = []
for data_point in data:
__lowercase : Optional[Any] = euclidean_distance(data_point[0] , snake_case__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
__lowercase : Optional[int] = [i[1] for i in sorted(snake_case__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
__lowercase : List[Any] = Counter(snake_case__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 365
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : List[str] = 2_56
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
__lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( lowerCAmelCase_ : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , *,
__a : float = 0.85 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = duplication_jaccard_threshold
__lowercase : Optional[Any] = NUM_PERM
__lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase : List[str] = defaultdict(__a )
def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None:
"""simple docstring"""
__lowercase : List[Any] = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase : Dict = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase : List[str] = [base] + list(__a )
# reformat the cluster to be a list of dict
__lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def lowerCAmelCase ( self : Any , __a : int ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase , __lowercase : Union[str, Any] = element
__lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ):
__lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[str] = get_tokens(lowerCAmelCase_ )
__lowercase : Dict = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : List[str] = None
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = []
for elementa in cluster:
__lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase : Dict = 1
extremes.append(lowerCAmelCase_ )
return extremes
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
global _shared_dataset
__lowercase : Tuple = dataset
__lowercase : Optional[int] = []
__lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ):
__lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase : int = {}
__lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase : Optional[Any] = element
__lowercase : int = duplicate_indices - set(extreme_dict.keys() )
__lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(lowerCAmelCase_ )}" )
print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" )
print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" )
return ds_filter, duplicate_clusters
| 306
| 0
|
from __future__ import annotations
import time
import numpy as np
lowerCamelCase : Tuple = [8, 5, 9, 7]
lowerCamelCase : Optional[int] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCamelCase : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple , __a : Optional[int] , __a : List[str] , __a : Optional[int] , ) -> None:
"""simple docstring"""
__lowercase : int = claim_vector
__lowercase : List[Any] = allocated_resources_table
__lowercase : List[str] = maximum_claim_table
def lowerCAmelCase ( self : str ) -> list[int]:
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowerCAmelCase ( self : List[Any] ) -> list[int]:
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowerCAmelCase ( self : Optional[int] ) -> list[list[int]]:
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__a ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowerCAmelCase ( self : Any ) -> dict[int, list[int]]:
"""simple docstring"""
return {self.__need().index(__a ): i for i in self.__need()}
def lowerCAmelCase ( self : List[Any] , **__a : List[Any] ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.__need()
__lowercase : List[str] = self.__allocated_resources_table
__lowercase : Any = self.__available_resources()
__lowercase : Optional[int] = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("""_""" * 50 + """\n""" )
while need_list:
__lowercase : List[str] = False
for each_need in need_list:
__lowercase : int = True
for index, need in enumerate(__a ):
if need > available_resources[index]:
__lowercase : Any = False
break
if execution:
__lowercase : int = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowercase : Any = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(__a )
# update available/freed resources stack
__lowercase : List[str] = np.array(__a ) + np.array(
alloc_resources_table[process_number] )
print(
"""Updated available resource stack for processes: """
+ """ """.join([str(__a ) for x in available_resources] ) )
break
if safe:
print("""The process is in a safe state.\n""" )
else:
print("""System in unsafe state. Aborting...\n""" )
break
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
print(""" """ * 9 + """Allocated Resource Table""" )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(__a ) + 1}"
+ """ """.join(F"{it:>8}" for it in item )
+ """\n""" )
print(""" """ * 9 + """System Resource Table""" )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(__a ) + 1}"
+ """ """.join(F"{it:>8}" for it in item )
+ """\n""" )
print(
"""Current Usage by Active Processes: """
+ """ """.join(str(__a ) for x in self.__claim_vector ) )
print(
"""Initial Available Resources: """
+ """ """.join(str(__a ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
|
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''image_processor''', '''feature_extractor''']
_A : List[Any] = '''TvltImageProcessor'''
_A : Optional[int] = '''TvltFeatureExtractor'''
def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__(image_processor=__a , feature_extractor=__a )
__lowercase : Union[str, Any] = image_processor
__lowercase : Tuple = feature_extractor
def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowercase : Tuple = None
if images is not None:
__lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
__lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
__lowercase : Optional[Any] = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
__lowercase : Tuple = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.image_processor.model_input_names
__lowercase : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 306
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict = logging.get_logger(__name__)
lowerCamelCase : int = {
'''asapp/sew-d-tiny-100k''': '''https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json''',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
_A : Optional[int] = """sew-d"""
def __init__( self : List[str] , __a : List[Any]=32 , __a : Dict=768 , __a : int=12 , __a : Union[str, Any]=12 , __a : Optional[int]=3072 , __a : Optional[Any]=2 , __a : List[Any]=512 , __a : Any=256 , __a : List[str]=True , __a : str=True , __a : Union[str, Any]=("p2c", "c2p") , __a : Dict="layer_norm" , __a : List[Any]="gelu_python" , __a : Any=0.1 , __a : Any=0.1 , __a : int=0.1 , __a : str=0.0 , __a : int=0.1 , __a : Union[str, Any]=0.02 , __a : List[str]=1E-7 , __a : Optional[int]=1E-5 , __a : Union[str, Any]="group" , __a : Optional[Any]="gelu" , __a : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __a : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __a : int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __a : List[str]=False , __a : Optional[Any]=128 , __a : List[str]=16 , __a : Optional[Any]=True , __a : List[Any]=0.05 , __a : Any=10 , __a : int=2 , __a : Optional[int]=0.0 , __a : str=10 , __a : Optional[Any]=0 , __a : List[str]="mean" , __a : str=False , __a : Optional[int]=False , __a : str=256 , __a : Dict=0 , __a : Optional[Any]=1 , __a : int=2 , **__a : str , ) -> Dict:
"""simple docstring"""
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase )
__lowercase : List[Any] = hidden_size
__lowercase : Optional[int] = feat_extract_norm
__lowercase : List[Any] = feat_extract_activation
__lowercase : Tuple = list(__lowercase )
__lowercase : Optional[int] = list(__lowercase )
__lowercase : Optional[Any] = list(__lowercase )
__lowercase : Optional[Any] = conv_bias
__lowercase : Optional[int] = num_conv_pos_embeddings
__lowercase : List[str] = num_conv_pos_embedding_groups
__lowercase : Dict = len(self.conv_dim )
__lowercase : Optional[int] = num_hidden_layers
__lowercase : int = intermediate_size
__lowercase : Union[str, Any] = squeeze_factor
__lowercase : List[Any] = max_position_embeddings
__lowercase : str = position_buckets
__lowercase : Union[str, Any] = share_att_key
__lowercase : List[str] = relative_attention
__lowercase : Any = norm_rel_ebd
__lowercase : int = list(__lowercase )
__lowercase : List[str] = hidden_act
__lowercase : int = num_attention_heads
__lowercase : Dict = hidden_dropout
__lowercase : List[str] = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : int = feat_proj_dropout
__lowercase : Optional[int] = final_dropout
__lowercase : List[Any] = layer_norm_eps
__lowercase : Optional[int] = feature_layer_norm_eps
__lowercase : List[Any] = initializer_range
__lowercase : str = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
F"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase : Dict = apply_spec_augment
__lowercase : Optional[Any] = mask_time_prob
__lowercase : Optional[Any] = mask_time_length
__lowercase : int = mask_time_min_masks
__lowercase : str = mask_feature_prob
__lowercase : Any = mask_feature_length
__lowercase : Optional[Any] = mask_feature_min_masks
# ctc loss
__lowercase : Optional[Any] = ctc_loss_reduction
__lowercase : List[str] = ctc_zero_infinity
# sequence classification
__lowercase : Optional[Any] = use_weighted_layer_sum
__lowercase : Union[str, Any] = classifier_proj_size
@property
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 367
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : int = batch_size
__lowercase : Any = seq_length
__lowercase : str = is_training
__lowercase : str = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : int = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : Union[str, Any] = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : Tuple = scope
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
__lowercase : str = model(__a , attention_mask=__a )
__lowercase : List[Any] = model(__a )
__lowercase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowercase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Any = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[str] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = False
_A : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = ()
_A : List[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Optional[Any] = True
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = EsmModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : List[str] = EsmEmbeddings(config=__a )
__lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowercase : int = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : Optional[Any] = EsmEmbeddings(config=__a )
__lowercase : Optional[int] = torch.empty(2 , 4 , 30 )
__lowercase : Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( __a ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[str] = model(__a )[0]
__lowercase : Union[str, Any] = 33
__lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
__lowercase : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : Any = model(__a )[0]
# compare the actual values for a slice.
__lowercase : int = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 306
| 0
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class lowerCAmelCase :
'''simple docstring'''
@property
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
return self.get_dummy_input()
@property
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def lowerCAmelCase ( self : Tuple , __a : List[str]=True , __a : Any=False , __a : List[Any]=False , __a : Any=False , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = 4
__lowercase : Optional[Any] = 32
__lowercase : int = (32, 32)
__lowercase : List[str] = torch.manual_seed(0 )
__lowercase : str = torch.device(__a )
__lowercase : Optional[int] = (batch_size, num_channels) + sizes
__lowercase : Tuple = randn_tensor(__a , generator=__a , device=__a )
__lowercase : Dict = {"""hidden_states""": hidden_states}
if include_temb:
__lowercase : Union[str, Any] = 128
__lowercase : Optional[Any] = randn_tensor((batch_size, temb_channels) , generator=__a , device=__a )
if include_res_hidden_states_tuple:
__lowercase : List[str] = torch.manual_seed(1 )
__lowercase : Optional[int] = (randn_tensor(__a , generator=__a , device=__a ),)
if include_encoder_hidden_states:
__lowercase : Any = floats_tensor((batch_size, 32, 32) ).to(__a )
if include_skip_sample:
__lowercase : int = randn_tensor(((batch_size, 3) + sizes) , generator=__a , device=__a )
return dummy_input
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
__lowercase : Optional[int] = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
__lowercase : str = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase ( self : Tuple , __a : Any ) -> int:
"""simple docstring"""
__lowercase , __lowercase : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
__lowercase : Tuple = self.block_class(**__a )
unet_block.to(__a )
unet_block.eval()
with torch.no_grad():
__lowercase : Dict = unet_block(**__a )
if isinstance(__a , __a ):
__lowercase : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
__lowercase : Dict = output[0, -1, -3:, -3:]
__lowercase : Union[str, Any] = torch.tensor(__a ).to(__a )
assert torch_all_close(output_slice.flatten() , __a , atol=5E-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase , __lowercase : str = self.prepare_init_args_and_inputs_for_common()
__lowercase : List[Any] = self.block_class(**__a )
model.to(__a )
model.train()
__lowercase : List[Any] = model(**__a )
if isinstance(__a , __a ):
__lowercase : int = output[0]
__lowercase : Dict = torch.device(__a )
__lowercase : Union[str, Any] = randn_tensor(output.shape , device=__a )
__lowercase : Tuple = torch.nn.functional.mse_loss(__a , __a )
loss.backward()
| 368
|
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 306
| 0
|
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : int , __a : int , __a : str , __a : Optional[Any] ) -> Dict:
"""simple docstring"""
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for a, b in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertAlmostEqual(_UpperCAmelCase , _UpperCAmelCase , delta=_UpperCAmelCase )
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_UpperCAmelCase ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
__lowercase : int = None
ops.enable_eager_execution_internal()
__lowercase : Optional[int] = tf.config.list_physical_devices("""CPU""" )
if len(_UpperCAmelCase ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__lowercase : Tuple = tf.config.list_logical_devices(device_type="""CPU""" )
__lowercase : List[str] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__lowercase : Tuple = GradientAccumulator()
__lowercase : List[Any] = tf.Variable([4.0, 3.0] )
__lowercase : Dict = create_optimizer(5E-5 , 10 , 5 )
__lowercase : Tuple = tf.Variable([0.0, 0.0] , trainable=_UpperCAmelCase )
def accumulate_on_replica(__a : str ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(__a : Optional[int] , __a : Optional[int] ):
with strategy.scope():
__lowercase : Union[str, Any] = strategy.experimental_local_results(_UpperCAmelCase )
local_variables[0].assign(_UpperCAmelCase )
local_variables[1].assign(_UpperCAmelCase )
strategy.run(_UpperCAmelCase , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_UpperCAmelCase )
def _check_local_values(__a : Tuple , __a : List[str] ):
__lowercase : List[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _UpperCAmelCase , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _UpperCAmelCase , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 369
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = (DPMSolverSDEScheduler,)
_A : Dict = 10
def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**__a )
return config
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.scheduler_classes[0]
__lowercase : List[str] = self.get_scheduler_config()
__lowercase : Any = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[Any] = self.dummy_model()
__lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Optional[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Optional[Any] = scheduler.step(__a , __a , __a )
__lowercase : str = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowercase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[int] = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Dict = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Dict = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[int] = model(__a , __a )
__lowercase : Optional[int] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : List[str] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config()
__lowercase : Optional[int] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : int = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase : int = scheduler.scale_model_input(__a , __a )
__lowercase : List[str] = model(__a , __a )
__lowercase : List[str] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : List[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.scheduler_classes[0]
__lowercase : List[Any] = self.get_scheduler_config()
__lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : List[str] = self.dummy_model()
__lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
__lowercase : str = sample.to(__a )
for t in scheduler.timesteps:
__lowercase : List[Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Any = scheduler.step(__a , __a , __a )
__lowercase : Optional[Any] = output.prev_sample
__lowercase : Any = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 306
| 0
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Tuple = '''T5Config'''
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] ):
__lowercase : int = jnp.zeros_like(__SCREAMING_SNAKE_CASE )
__lowercase : Tuple = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
__lowercase : Tuple = shifted_input_ids.at[:, 0].set(__SCREAMING_SNAKE_CASE )
__lowercase : Any = jnp.where(shifted_input_ids == -100 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return shifted_input_ids
class lowerCAmelCase ( __snake_case ):
'''simple docstring'''
_A : Tuple = """mt5"""
_A : Dict = MTaConfig
class lowerCAmelCase ( __snake_case ):
'''simple docstring'''
_A : str = """mt5"""
_A : Tuple = MTaConfig
class lowerCAmelCase ( __snake_case ):
'''simple docstring'''
_A : Tuple = """mt5"""
_A : List[str] = MTaConfig
| 370
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : str = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase : List[str] = args.per_device_eval_batch_size
lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : List[str] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ )
# start time
__lowercase : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowercase : int = time.time()
__lowercase : Union[str, Any] = end_time - start_time
__lowercase : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names
lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def snake_case_ ( lowerCAmelCase_ : int ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowercase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowercase : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowercase : Any = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ )
__lowercase : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowercase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowercase : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCamelCase : Tuple = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase : Union[str, Any] = default_data_collator
lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowercase : int = postprocess_qa_predictions(
examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowercase : Optional[int] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ )
lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def snake_case_ ( lowerCAmelCase_ : str ):
return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase : int = 0.0
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = timeit.default_timer()
lowerCamelCase : List[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : Dict = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 306
| 0
|
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowerCamelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Optional[int] , __a : CLIPSegForImageSegmentation , __a : CLIPSegProcessor , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , ) -> Tuple:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
__lowercase : int = (
F"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
F" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , __snake_case , standard_warn=__snake_case )
__lowercase : Union[str, Any] = dict(scheduler.config )
__lowercase : Any = 1
__lowercase : List[str] = FrozenDict(__snake_case )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
__lowercase : Optional[Any] = (
F"The configuration file of this scheduler: {scheduler} has not set the configuration"
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , __snake_case , standard_warn=__snake_case )
__lowercase : Optional[Any] = dict(scheduler.config )
__lowercase : Optional[Any] = True
__lowercase : Dict = FrozenDict(__snake_case )
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=__snake_case , segmentation_processor=__snake_case , vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , )
def lowerCAmelCase ( self : int , __a : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowercase : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
self.enable_attention_slicing(__snake_case )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowercase : Union[str, Any] = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__snake_case , __snake_case )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__snake_case , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[Any] , __a : Union[str, List[str]] , __a : Union[torch.FloatTensor, PIL.Image.Image] , __a : str , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Tuple , ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
__lowercase : Tuple = self.segmentation_model(**__snake_case )
__lowercase : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__lowercase : List[Any] = self.numpy_to_pil(__snake_case )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__lowercase : Optional[Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__snake_case , image=__snake_case , mask_image=__snake_case , height=__snake_case , width=__snake_case , num_inference_steps=__snake_case , guidance_scale=__snake_case , negative_prompt=__snake_case , num_images_per_prompt=__snake_case , eta=__snake_case , generator=__snake_case , latents=__snake_case , output_type=__snake_case , return_dict=__snake_case , callback=__snake_case , callback_steps=__snake_case , )
| 371
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''nllb-moe'''
_A : List[str] = ['''past_key_values''']
_A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any:
"""simple docstring"""
__lowercase : int = vocab_size
__lowercase : List[Any] = max_position_embeddings
__lowercase : Tuple = d_model
__lowercase : str = encoder_ffn_dim
__lowercase : List[str] = encoder_layers
__lowercase : int = encoder_attention_heads
__lowercase : List[Any] = decoder_ffn_dim
__lowercase : int = decoder_layers
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : Union[str, Any] = dropout
__lowercase : str = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : List[Any] = activation_function
__lowercase : List[str] = init_std
__lowercase : Optional[int] = encoder_layerdrop
__lowercase : str = decoder_layerdrop
__lowercase : Dict = use_cache
__lowercase : Optional[Any] = encoder_layers
__lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : List[Any] = router_z_loss_coef
__lowercase : Tuple = router_aux_loss_coef
__lowercase : str = decoder_sparse_step
__lowercase : Any = encoder_sparse_step
__lowercase : str = num_experts
__lowercase : List[Any] = expert_capacity
__lowercase : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase : Optional[int] = router_dtype
__lowercase : Any = router_ignore_padding_tokens
__lowercase : Optional[Any] = batch_prioritized_routing
__lowercase : str = second_expert_policy
__lowercase : List[str] = normalize_router_prob_before_dropping
__lowercase : List[Any] = moe_eval_capacity_token_fraction
__lowercase : List[str] = moe_token_dropout
__lowercase : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 306
| 0
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCAmelCase :
'''simple docstring'''
_A : List[str] = None
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : int = os.path.join(__a , """feat_extract.json""" )
feat_extract_first.to_json_file(__a )
__lowercase : str = self.feature_extraction_class.from_json_file(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase : Union[str, Any] = feat_extract_first.save_pretrained(__a )[0]
check_json_file_has_correct_format(__a )
__lowercase : List[Any] = self.feature_extraction_class.from_pretrained(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[Any] = self.feature_extraction_class()
self.assertIsNotNone(__a )
| 350
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[Any] = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['''PoolFormerFeatureExtractor''']
lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 306
| 0
|
import flax.linen as nn
import jax
import jax.numpy as jnp
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
_A : int
_A : jnp.dtype = jnp.floataa
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : List[Any] , __a : Tuple ) -> int:
"""simple docstring"""
__lowercase : int = hidden_states.shape
__lowercase : Tuple = jax.image.resize(
__a , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , )
__lowercase : Dict = self.conv(__a )
return hidden_states
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
_A : int
_A : jnp.dtype = jnp.floataa
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Union[str, Any] , __a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = self.conv(__a )
return hidden_states
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
_A : int
_A : int = None
_A : float = 0.0
_A : bool = None
_A : jnp.dtype = jnp.floataa
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[int] = self.in_channels if self.out_channels is None else self.out_channels
__lowercase : Optional[int] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowercase : int = nn.Conv(
__a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowercase : List[Any] = nn.Dense(__a , dtype=self.dtype )
__lowercase : List[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowercase : Dict = nn.Dropout(self.dropout_prob )
__lowercase : Dict = nn.Conv(
__a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowercase : str = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__lowercase : str = None
if use_nin_shortcut:
__lowercase : Optional[Any] = nn.Conv(
__a , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , )
def __call__( self : int , __a : Optional[Any] , __a : str , __a : Tuple=True ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Dict = hidden_states
__lowercase : int = self.norma(__a )
__lowercase : Tuple = nn.swish(__a )
__lowercase : Any = self.conva(__a )
__lowercase : int = self.time_emb_proj(nn.swish(__a ) )
__lowercase : Optional[int] = jnp.expand_dims(jnp.expand_dims(__a , 1 ) , 1 )
__lowercase : Any = hidden_states + temb
__lowercase : Union[str, Any] = self.norma(__a )
__lowercase : List[str] = nn.swish(__a )
__lowercase : Union[str, Any] = self.dropout(__a , __a )
__lowercase : Optional[int] = self.conva(__a )
if self.conv_shortcut is not None:
__lowercase : Tuple = self.conv_shortcut(__a )
return hidden_states + residual
| 351
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : Tuple = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ):
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Optional[str] , lowerCAmelCase_ : Optional[str] = None ):
__lowercase : List[Any] = tesseract_config if tesseract_config is not None else """"""
# apply OCR
__lowercase : Dict = to_pil_image(lowerCAmelCase_ )
__lowercase : Dict = pil_image.size
__lowercase : Tuple = pytesseract.image_to_data(lowerCAmelCase_ , lang=lowerCAmelCase_ , output_type="""dict""" , config=lowerCAmelCase_ )
__lowercase : int = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
__lowercase : int = [idx for idx, word in enumerate(lowerCAmelCase_ ) if not word.strip()]
__lowercase : Optional[int] = [word for idx, word in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Optional[int] = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Dict = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Dict = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__lowercase : Tuple = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowercase : Optional[Any] = []
for x, y, w, h in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : str = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase_ )
# finally, normalize the bounding boxes
__lowercase : List[str] = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Dict = ['''pixel_values''']
def __init__( self : Tuple , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : bool = True , __a : Optional[str] = None , __a : Optional[str] = "" , **__a : Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Optional[Any] = size if size is not None else {"""height""": 224, """width""": 224}
__lowercase : int = get_size_dict(__a )
__lowercase : Optional[Any] = do_resize
__lowercase : Tuple = size
__lowercase : List[Any] = resample
__lowercase : Any = apply_ocr
__lowercase : int = ocr_lang
__lowercase : List[str] = tesseract_config
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BILINEAR , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Any = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
__lowercase : Any = (size["""height"""], size["""width"""])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Dict , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : Optional[str] = None , __a : Optional[str] = None , __a : Optional[Union[str, TensorType]] = None , __a : ChannelDimension = ChannelDimension.FIRST , **__a : int , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
__lowercase : Optional[int] = size if size is not None else self.size
__lowercase : Optional[int] = get_size_dict(__a )
__lowercase : List[Any] = resample if resample is not None else self.resample
__lowercase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowercase : Optional[Any] = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowercase : Tuple = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowercase : Optional[int] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
__lowercase : Tuple = [to_numpy_array(__a ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
__lowercase : Union[str, Any] = []
__lowercase : Union[str, Any] = []
for image in images:
__lowercase : int = apply_tesseract(__a , __a , __a )
words_batch.append(__a )
boxes_batch.append(__a )
if do_resize:
__lowercase : Optional[int] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__lowercase : str = [flip_channel_order(__a ) for image in images]
__lowercase : Dict = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = BatchFeature(data={"""pixel_values""": images} , tensor_type=__a )
if apply_ocr:
__lowercase : List[str] = words_batch
__lowercase : Optional[Any] = boxes_batch
return data
| 352
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__lowercase : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowercase : Optional[Any] = model(__a )["""last_hidden_state"""]
__lowercase : Any = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__lowercase : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 306
| 0
|
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''image_processor''', '''feature_extractor''']
_A : List[Any] = '''TvltImageProcessor'''
_A : Optional[int] = '''TvltFeatureExtractor'''
def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__(image_processor=__a , feature_extractor=__a )
__lowercase : Union[str, Any] = image_processor
__lowercase : Tuple = feature_extractor
def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowercase : Tuple = None
if images is not None:
__lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
__lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
__lowercase : Optional[Any] = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
__lowercase : Tuple = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.image_processor.model_input_names
__lowercase : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 353
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
__lowercase : str = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Tuple = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Optional[Any] = True
if a[i].islower():
__lowercase : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[str] = {'''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : List[Any] = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = VOCAB_FILES_NAMES
_A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_A : List[str] = ['''input_ids''', '''attention_mask''']
_A : Union[str, Any] = None
def __init__( self : List[str] , __a : Union[str, Any]=None , __a : Dict=None , __a : Dict=None , __a : Optional[int]="<unk>" , __a : List[Any]="<s>" , __a : Union[str, Any]="</s>" , __a : int="<pad>" , __a : Any=False , __a : int=False , **__a : List[Any] , ) -> int:
"""simple docstring"""
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , add_prefix_space=__a , clean_up_tokenization_spaces=__a , **__a , )
__lowercase : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __a ) != add_prefix_space:
__lowercase : Tuple = getattr(__a , pre_tok_state.pop("""type""" ) )
__lowercase : Dict = add_prefix_space
__lowercase : str = pre_tok_class(**__a )
__lowercase : List[str] = add_prefix_space
def lowerCAmelCase ( self : List[str] , *__a : Optional[Any] , **__a : Tuple ) -> BatchEncoding:
"""simple docstring"""
__lowercase : Tuple = kwargs.get("""is_split_into_words""" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*__a , **__a )
def lowerCAmelCase ( self : str , *__a : int , **__a : Dict ) -> BatchEncoding:
"""simple docstring"""
__lowercase : Tuple = kwargs.get("""is_split_into_words""" , __a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
""" pretokenized inputs.""" )
return super()._encode_plus(*__a , **__a )
def lowerCAmelCase ( self : int , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
__lowercase : Any = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
def lowerCAmelCase ( self : List[Any] , __a : "Conversation" ) -> List[int]:
"""simple docstring"""
__lowercase : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a ) + [self.eos_token_id] )
if len(__a ) > self.model_max_length:
__lowercase : List[str] = input_ids[-self.model_max_length :]
return input_ids
| 354
|
from scipy.stats import spearmanr
import datasets
lowerCamelCase : List[str] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCamelCase : List[str] = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCamelCase : Union[str, Any] = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = spearmanr(__a , __a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 306
| 0
|
import json
import sys
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
with open(lowerCAmelCase_ , encoding="""utf-8""" ) as f:
__lowercase : Optional[Any] = json.load(lowerCAmelCase_ )
__lowercase : Any = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(lowerCAmelCase_ ):
__lowercase : List[Any] = results[benchmark_name]
__lowercase : int = benchmark_name.split("""/""" )[-1]
output_md.append(F"### Benchmark: {benchmark_file_name}" )
__lowercase : Tuple = """| metric |"""
__lowercase : Optional[Any] = """|--------|"""
__lowercase : Tuple = """| new / old (diff) |"""
for metric_name in sorted(lowerCAmelCase_ ):
__lowercase : str = benchmark_res[metric_name]
__lowercase : Tuple = metric_vals["""new"""]
__lowercase : Union[str, Any] = metric_vals.get("""old""" , lowerCAmelCase_ )
__lowercase : Union[str, Any] = metric_vals.get("""diff""" , lowerCAmelCase_ )
__lowercase : Optional[Any] = F" {new_val:f}" if isinstance(lowerCAmelCase_ , (int, float) ) else """None"""
if old_val is not None:
val_str += F" / {old_val:f}" if isinstance(lowerCAmelCase_ , (int, float) ) else "None"
if dif_val is not None:
val_str += F" ({dif_val:f})" if isinstance(lowerCAmelCase_ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(lowerCAmelCase_ ) )
if __name__ == "__main__":
lowerCamelCase : str = sys.argv[1]
lowerCamelCase : Union[str, Any] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 355
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 306
| 0
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = BlenderbotSmallTokenizer
_A : str = False
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase : Dict = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
__lowercase : Tuple = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : str = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
__lowercase : int = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
__lowercase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def lowerCAmelCase ( self : Tuple , **__a : Union[str, Any] ) -> str:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Union[str, Any] , __a : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = """adapt act apte"""
__lowercase : Any = """adapt act apte"""
return input_text, output_text
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowercase : Dict = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowercase : Dict = """adapt act apte"""
__lowercase : int = ["""adapt""", """act""", """ap@@""", """te"""]
__lowercase : Any = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__lowercase : List[str] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__lowercase : Union[str, Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [1384]
__lowercase : int = """I am a small frog."""
__lowercase : int = tok([src_text] , padding=__a , truncation=__a )["""input_ids"""]
__lowercase : Optional[Any] = tok.batch_decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
__lowercase : Tuple = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
__lowercase : Optional[Any] = """I am a small frog ."""
__lowercase : Tuple = """."""
__lowercase : List[Any] = tok(__a )["""input_ids"""]
__lowercase : List[str] = tok(__a )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 356
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 306
| 0
|
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = 0
@slow
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__lowercase : List[Any] = AutoTokenizer.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__a ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__lowercase : Dict = AutoTokenizer.from_pretrained(__a )
self.assertIsNotNone(__a )
self.assertIsInstance(__a , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__a ) , 0 )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : Dict = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase : Any = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = AutoConfig.from_pretrained(__a )
self.assertIsInstance(__a , __a )
# Check that tokenizer_type ≠ model_type
__lowercase : List[str] = AutoTokenizer.from_pretrained(__a , config=__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__a , """vocab.txt""" ) )
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(__a , tokenizer_type="""bert""" , use_fast=__a )
self.assertIsInstance(__a , __a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__a , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__a , """merges.txt""" ) )
__lowercase : Tuple = AutoTokenizer.from_pretrained(__a , tokenizer_type="""gpt2""" , use_fast=__a )
self.assertIsInstance(__a , __a )
@require_tokenizers
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.txt""" , os.path.join(__a , """vocab.txt""" ) )
__lowercase : Any = AutoTokenizer.from_pretrained(__a , tokenizer_type="""bert""" )
self.assertIsInstance(__a , __a )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy("""./tests/fixtures/vocab.json""" , os.path.join(__a , """vocab.json""" ) )
shutil.copy("""./tests/fixtures/merges.txt""" , os.path.join(__a , """merges.txt""" ) )
__lowercase : Tuple = AutoTokenizer.from_pretrained(__a , tokenizer_type="""gpt2""" )
self.assertIsInstance(__a , __a )
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
with pytest.raises(__a ):
AutoTokenizer.from_pretrained("""./""" , tokenizer_type="""xxx""" )
@require_tokenizers
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__lowercase : Any = tokenizer_class.from_pretrained("""wietsedv/bert-base-dutch-cased""" )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
if isinstance(__a , __a ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __a )
else:
self.assertEqual(tokenizer.do_lower_case , __a )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__a , """julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier""" , ):
__lowercase : Optional[Any] = tokenizer_class.from_pretrained("""julien-c/herlolip-not-exists""" )
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = TOKENIZER_MAPPING.values()
__lowercase : List[str] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__a )
@require_tokenizers
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__a ) , __a )
self.assertIsInstance(AutoTokenizer.from_pretrained("""bert-base-cased""" ) , __a )
@require_tokenizers
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : str = AutoTokenizer.from_pretrained("""distilbert-base-uncased""" , do_lower_case=__a )
__lowercase : Optional[int] = """Hello, world. How are you?"""
__lowercase : List[Any] = tokenizer.tokenize(__a )
self.assertEqual("""[UNK]""" , tokens[0] )
__lowercase : Tuple = AutoTokenizer.from_pretrained("""microsoft/mpnet-base""" , do_lower_case=__a )
__lowercase : str = tokenizer.tokenize(__a )
self.assertEqual("""[UNK]""" , tokens[0] )
@require_tokenizers
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = AutoTokenizer.from_pretrained("""robot-test/dummy-tokenizer-fast-with-model-config""" )
self.assertEqual(type(__a ) , __a )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30000 )
self.assertEqual(tokenizer.unk_token , """[UNK]""" )
self.assertEqual(tokenizer.padding_side , """right""" )
self.assertEqual(tokenizer.truncation_side , """right""" )
def lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[int] = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : Any = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = AutoTokenizer.from_pretrained("""ctrl""" )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__a , __a )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = get_tokenizer_config("""bert-base-cased""" )
__lowercase : List[str] = config.pop("""_commit_hash""" , __a )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__a , {"""do_lower_case""": False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__lowercase : str = get_tokenizer_config(__a )
self.assertDictEqual(__a , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__lowercase : str = AutoTokenizer.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : List[Any] = get_tokenizer_config(__a )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config["""tokenizer_class"""] , """BertTokenizer""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __a )
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
__lowercase : List[Any] = CustomTokenizer.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
try:
AutoConfig.register("""custom""" , __a )
# Can register in two steps
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__a , fast_tokenizer_class=__a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__a , slow_tokenizer_class=__a , fast_tokenizer_class=__a )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoTokenizer.register(__a , fast_tokenizer_class=__a )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase : int = BertTokenizerFast.from_pretrained(__a )
bert_tokenizer.save_pretrained(__a )
__lowercase : str = CustomTokenizerFast.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : List[str] = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , __a )
__lowercase : int = AutoTokenizer.from_pretrained(__a , use_fast=__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
with self.assertRaises(__a ):
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
__lowercase : Tuple = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a )
__lowercase : List[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(__a , trust_remote_code=__a )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowercase : List[str] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a , use_fast=__a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a )
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(__a , trust_remote_code=__a , use_fast=__a )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , """NewTokenizer""" )
@require_tokenizers
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Tuple = False
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = NewTokenizer
_A : Union[str, Any] = False
try:
AutoConfig.register("""custom""" , __a )
AutoTokenizer.register(__a , slow_tokenizer_class=__a )
AutoTokenizer.register(__a , fast_tokenizer_class=__a )
# If remote code is not set, the default is to use local
__lowercase : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowercase : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/test_dynamic_tokenizer""" , use_fast=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__lowercase : List[str] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertFalse(tokenizer.special_attribute_present )
__lowercase : Optional[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a , use_fast=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__lowercase : Any = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
self.assertTrue(tokenizer.special_attribute_present )
__lowercase : Dict = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer""" , trust_remote_code=__a , use_fast=__a )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : str = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__a )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" )
# Test we can also load the slow version
__lowercase : List[Any] = AutoTokenizer.from_pretrained(
"""hf-internal-testing/test_dynamic_tokenizer_legacy""" , trust_remote_code=__a , use_fast=__a )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
else:
self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" )
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
with self.assertRaisesRegex(
__a , """bert-base is not a local folder and is not a valid model identifier""" ):
__lowercase : List[Any] = AutoTokenizer.from_pretrained("""bert-base""" )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
with self.assertRaisesRegex(
__a , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__lowercase : Union[str, Any] = AutoTokenizer.from_pretrained(__a , revision="""aaaaaa""" )
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
with RequestCounter() as counter:
__lowercase : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 357
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 306
| 0
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Any=None ):
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F"{torch_layer} layer.weight does not match"
__lowercase : Any = nn.Parameter(lowerCAmelCase_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"{torch_layer} layer.bias does not match"
__lowercase : Tuple = nn.Parameter(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ):
# set torch weights for 1-to-1 comparison
__lowercase : str = np.asarray(weights[0] )
__lowercase : Optional[Any] = np.asarray(weights[1] )
__lowercase : Tuple = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] ):
# set torch weights for 1-to-1 comparison
__lowercase : Optional[int] = np.asarray(weights[0] )
__lowercase : List[str] = np.asarray(weights[1] )
__lowercase : Dict = np.asarray(weights[2] )
__lowercase : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase_ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase_ ).view(-1 , lowerCAmelCase_ ).contiguous().transpose(0 , 1 ) , )
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] ):
# layernorm 1
__lowercase : Union[str, Any] = weights[0][0][0]
__lowercase : Optional[int] = np.asarray(layer_norm_a[0] )
__lowercase : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# lsh weights + output
__lowercase : Dict = weights[0][1]
if len(lowerCAmelCase_ ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
else:
set_layer_weights_in_torch_local(lowerCAmelCase_ , torch_block.attention , lowerCAmelCase_ )
# intermediate weighs
__lowercase : Optional[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase_ ) == 4:
__lowercase : List[Any] = intermediate_weights[2]
# layernorm 2
__lowercase : Union[str, Any] = np.asarray(intermediate_weights[0][0] )
__lowercase : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# intermediate dense
__lowercase : List[Any] = np.asarray(intermediate_weights[1][0] )
__lowercase : str = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
# intermediate out
__lowercase : Dict = np.asarray(intermediate_weights[4][0] )
__lowercase : Dict = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] ):
# reformer model
__lowercase : str = torch_model.reformer
# word embeds
__lowercase : List[str] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase_ ) , )
if isinstance(weights[3] , lowerCAmelCase_ ):
__lowercase : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__lowercase : List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"{position_embeddings[emb_idx]} emb does not match"
__lowercase : Optional[Any] = nn.Parameter(torch.tensor(lowerCAmelCase_ ) )
__lowercase : Union[str, Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__lowercase : Optional[int] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# output layer norm
__lowercase : Union[str, Any] = np.asarray(weights[7][0] )
__lowercase : Optional[int] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase_ ) , torch.tensor(lowerCAmelCase_ ) , )
# output embeddings
__lowercase : Tuple = np.asarray(weights[9][0] )
__lowercase : Tuple = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase_ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase_ ) , )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ):
# Initialise PyTorch model
__lowercase : Optional[int] = ReformerConfig.from_json_file(lowerCAmelCase_ )
print(F"Building PyTorch model from configuration: {config}" )
__lowercase : Dict = ReformerModelWithLMHead(lowerCAmelCase_ )
with open(lowerCAmelCase_ , """rb""" ) as f:
__lowercase : Tuple = pickle.load(lowerCAmelCase_ )["""weights"""]
set_model_weights_in_torch(lowerCAmelCase_ , lowerCAmelCase_ , config.hidden_size )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : List[str] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 358
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase : Tuple = tempfile.mkdtemp()
# fmt: off
__lowercase : str = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__lowercase : str = dict(zip(__a , range(len(__a ) ) ) )
__lowercase : str = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__lowercase : str = {"""unk_token""": """<unk>"""}
__lowercase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
__lowercase : Optional[int] = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
__lowercase : List[Any] = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__a , __a )
def lowerCAmelCase ( self : List[str] , **__a : Optional[int] ) -> Any:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Any , **__a : Any ) -> str:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Optional[int] , **__a : Any ) -> Tuple:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase : Dict = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.get_tokenizer()
__lowercase : List[Any] = self.get_rust_tokenizer()
__lowercase : str = self.get_image_processor()
__lowercase : Tuple = CLIPProcessor(tokenizer=__a , image_processor=__a )
processor_slow.save_pretrained(self.tmpdirname )
__lowercase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
__lowercase : Optional[int] = CLIPProcessor(tokenizer=__a , image_processor=__a )
processor_fast.save_pretrained(self.tmpdirname )
__lowercase : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __a )
self.assertIsInstance(processor_fast.tokenizer , __a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __a )
self.assertIsInstance(processor_fast.image_processor , __a )
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : Dict = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowercase : str = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__lowercase : Optional[Any] = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = self.get_image_processor()
__lowercase : Optional[int] = self.get_tokenizer()
__lowercase : List[Any] = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowercase : Optional[int] = self.prepare_image_inputs()
__lowercase : Union[str, Any] = image_processor(__a , return_tensors="""np""" )
__lowercase : Union[str, Any] = processor(images=__a , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.get_image_processor()
__lowercase : Optional[Any] = self.get_tokenizer()
__lowercase : List[str] = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowercase : int = """lower newer"""
__lowercase : Optional[int] = processor(text=__a )
__lowercase : Dict = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = self.get_image_processor()
__lowercase : str = self.get_tokenizer()
__lowercase : Union[str, Any] = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowercase : Any = """lower newer"""
__lowercase : str = self.prepare_image_inputs()
__lowercase : List[Any] = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : int = self.get_image_processor()
__lowercase : Any = self.get_tokenizer()
__lowercase : Union[str, Any] = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowercase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase : Union[str, Any] = processor.batch_decode(__a )
__lowercase : int = tokenizer.batch_decode(__a )
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = self.get_image_processor()
__lowercase : Any = self.get_tokenizer()
__lowercase : int = CLIPProcessor(tokenizer=__a , image_processor=__a )
__lowercase : Optional[int] = """lower newer"""
__lowercase : List[Any] = self.prepare_image_inputs()
__lowercase : str = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 359
|
def snake_case_ ( lowerCAmelCase_ : int = 200 ):
__lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__lowercase : List[str] = [0] * (pence + 1)
__lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 306
| 0
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Optional[Any] = '''▁'''
lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : str = BertGenerationTokenizer
_A : int = False
_A : Dict = True
def lowerCAmelCase ( self : int ) -> Tuple:
"""simple docstring"""
super().setUp()
__lowercase : str = BertGenerationTokenizer(__a , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = """<s>"""
__lowercase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(__a ) , 1002 )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : str = BertGenerationTokenizer(__a , keep_accents=__a )
__lowercase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [285, 46, 10, 170, 382] , )
__lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__lowercase : Tuple = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__lowercase : List[str] = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Any = """Hello World!"""
__lowercase : Tuple = [18536, 2260, 101]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@slow
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__lowercase : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(__a , self.big_tokenizer.encode(__a ) )
@require_torch
@slow
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__lowercase : int = list(self.big_tokenizer.get_vocab().keys() )[:10]
__lowercase : Optional[Any] = """ """.join(__a )
__lowercase : Union[str, Any] = self.big_tokenizer.encode_plus(__a , return_tensors="""pt""" , return_token_type_ids=__a )
__lowercase : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] , return_tensors="""pt""" , return_token_type_ids=__a )
__lowercase : Optional[Any] = BertGenerationConfig()
__lowercase : Union[str, Any] = BertGenerationEncoder(__a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a )
model(**__a )
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = {"""input_ids""": [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" , revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" , )
| 360
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a )
| 306
| 0
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Tuple = BertJapaneseTokenizer
_A : Optional[Any] = False
_A : Optional[int] = True
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
__lowercase : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCAmelCase ( self : str , __a : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = """こんにちは、世界。 \nこんばんは、世界。"""
__lowercase : int = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def lowerCAmelCase ( self : int , __a : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = self.get_input_output_texts(__a )
__lowercase : Union[str, Any] = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : int = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
return text, ids
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Tuple = self.tokenizer_class(self.vocab_file )
__lowercase : Any = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(__a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""mecab""" )
self.assertIsNotNone(__a )
__lowercase : Union[str, Any] = """こんにちは、世界。\nこんばんは、世界。"""
__lowercase : Dict = tokenizer.tokenize(__a )
self.assertListEqual(__a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase : Dict = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__a , """wb""" ) as handle:
pickle.dump(__a , __a )
with open(__a , """rb""" ) as handle:
__lowercase : int = pickle.load(__a )
__lowercase : List[Any] = tokenizer_new.tokenize(__a )
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase : Any = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
try:
__lowercase : List[Any] = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
try:
__lowercase : Any = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : int = MecabTokenizer(do_lower_case=__a , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
try:
__lowercase : Union[str, Any] = MecabTokenizer(
do_lower_case=__a , normalize_text=__a , mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase : int = MecabTokenizer(normalize_text=__a , mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] , )
@require_sudachi
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(__a )
__lowercase : Union[str, Any] = """こんにちは、世界。\nこんばんは、世界。"""
__lowercase : Optional[Any] = tokenizer.tokenize(__a )
self.assertListEqual(__a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase : Optional[Any] = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__a , """wb""" ) as handle:
pickle.dump(__a , __a )
with open(__a , """rb""" ) as handle:
__lowercase : Tuple = pickle.load(__a )
__lowercase : Dict = tokenizer_new.tokenize(__a )
self.assertListEqual(__a , __a )
@require_sudachi
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowercase : int = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人""", """参政権"""] )
@require_sudachi
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="""core""" , sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) , ["""外国人参政権"""] )
@require_sudachi
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = SudachiTokenizer(do_lower_case=__a , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] , )
@require_sudachi
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = SudachiTokenizer(normalize_text=__a , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , [""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] , )
@require_sudachi
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[int] = SudachiTokenizer(trim_whitespace=__a , sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] , )
@require_jumanpp
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class(self.vocab_file , word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(__a )
__lowercase : Any = """こんにちは、世界。\nこんばんは、世界。"""
__lowercase : List[Any] = tokenizer.tokenize(__a )
self.assertListEqual(__a , ["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__lowercase : Dict = os.path.join(self.tmpdirname , """tokenizer.bin""" )
with open(__a , """wb""" ) as handle:
pickle.dump(__a , __a )
with open(__a , """rb""" ) as handle:
__lowercase : List[Any] = pickle.load(__a )
__lowercase : Any = tokenizer_new.tokenize(__a )
self.assertListEqual(__a , __a )
@require_jumanpp
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = JumanppTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase : int = JumanppTokenizer(normalize_text=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] , )
@require_jumanpp
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = JumanppTokenizer(trim_whitespace=__a )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) , ["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] , )
@require_jumanpp
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : int = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) , ["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] , )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
__lowercase : int = {}
for i, token in enumerate(__a ):
__lowercase : Dict = i
__lowercase : int = WordpieceTokenizer(vocab=__a , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) , ["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) , ["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
__lowercase : str = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
__lowercase : Optional[Any] = tokenizer.subword_tokenizer
__lowercase : str = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(__a , ["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
__lowercase : Any = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(__a , ["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
__lowercase : Optional[Any] = tokenizer.encode("""ありがとう。""" , add_special_tokens=__a )
__lowercase : List[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a , __a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Any = BertJapaneseTokenizer
_A : Tuple = False
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
super().setUp()
__lowercase : Tuple = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def lowerCAmelCase ( self : Dict , **__a : str ) -> Optional[int]:
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="""character""" , **__a )
def lowerCAmelCase ( self : Dict , __a : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : Any = """こんにちは、世界。 \nこんばんは、世界。"""
__lowercase : Dict = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def lowerCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass # TODO add if relevant
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="""character""" )
__lowercase : Optional[Any] = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
__a , ["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
__lowercase : Dict = {}
for i, token in enumerate(__a ):
__lowercase : List[str] = i
__lowercase : Tuple = CharacterTokenizer(vocab=__a , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) , ["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) , ["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
__lowercase : Optional[int] = tokenizer.encode("""ありがとう。""" , add_special_tokens=__a )
__lowercase : Optional[Any] = tokenizer.encode("""どういたしまして。""" , add_special_tokens=__a )
__lowercase : Any = tokenizer.build_inputs_with_special_tokens(__a )
__lowercase : Dict = tokenizer.build_inputs_with_special_tokens(__a , __a )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = """cl-tohoku/bert-base-japanese"""
__lowercase : Tuple = AutoTokenizer.from_pretrained(__a )
self.assertIsInstance(__a , __a )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
__lowercase : Union[str, Any] = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(__a )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
__lowercase : Tuple = """bert-base-cased"""
with self.assertLogs("""transformers""" , level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(__a )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 361
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
for attribute in key.split(""".""" ):
__lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__lowercase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase : Dict = value
elif weight_type == "weight_g":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowercase : List[Any] = value
elif weight_type == "bias":
__lowercase : int = value
elif weight_type == "running_mean":
__lowercase : List[Any] = value
elif weight_type == "running_var":
__lowercase : int = value
elif weight_type == "num_batches_tracked":
__lowercase : int = value
elif weight_type == "inv_freq":
__lowercase : Optional[Any] = value
else:
__lowercase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = []
__lowercase : Any = fairseq_model.state_dict()
__lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase : Tuple = True
if "*" in mapped_key:
__lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
__lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "pos_bias_u" in name:
__lowercase : Any = None
elif "pos_bias_v" in name:
__lowercase : Tuple = None
elif "weight_g" in name:
__lowercase : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__lowercase : Dict = """weight_v"""
elif "bias" in name:
__lowercase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase : str = """weight"""
elif "running_mean" in name:
__lowercase : str = """running_mean"""
elif "inv_freq" in name:
__lowercase : List[Any] = """inv_freq"""
elif "running_var" in name:
__lowercase : Any = """running_var"""
elif "num_batches_tracked" in name:
__lowercase : Any = """num_batches_tracked"""
else:
__lowercase : Optional[int] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowercase : int = name.split(""".""" )
__lowercase : Optional[Any] = int(items[0] )
__lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ):
if config_path is not None:
__lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" )
else:
__lowercase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase : Tuple = """rotary"""
if is_finetuned:
if dict_path:
__lowercase : Any = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : List[Any] = target_dict.pad_index
__lowercase : Optional[int] = target_dict.bos_index
__lowercase : List[Any] = target_dict.eos_index
__lowercase : List[str] = len(target_dict.symbols )
__lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase : int = 0
__lowercase : Any = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
__lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False
__lowercase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
__lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
__lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
__lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
__lowercase : Dict = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : Any = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 306
| 0
|
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 362
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
__lowercase : str = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 363
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
| 306
| 0
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : int = batch_size
__lowercase : Any = seq_length
__lowercase : str = is_training
__lowercase : str = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : int = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : Union[str, Any] = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : Tuple = scope
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
__lowercase : str = model(__a , attention_mask=__a )
__lowercase : List[Any] = model(__a )
__lowercase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowercase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Any = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
(
__lowercase
) : List[str] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = False
_A : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = ()
_A : List[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Optional[Any] = True
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = EsmModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : List[str] = EsmEmbeddings(config=__a )
__lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowercase : int = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : Optional[Any] = EsmEmbeddings(config=__a )
__lowercase : Optional[int] = torch.empty(2 , 4 , 30 )
__lowercase : Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( __a ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[str] = model(__a )[0]
__lowercase : Union[str, Any] = 33
__lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
__lowercase : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : Any = model(__a )[0]
# compare the actual values for a slice.
__lowercase : int = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 364
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 306
| 0
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[Any] = logging.get_logger(__name__)
lowerCamelCase : Any = {
'''BAAI/AltCLIP''': '''https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json''',
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[Any] = '''altclip_text_model'''
def __init__( self : Optional[Any] , __a : int=250002 , __a : Dict=1024 , __a : Optional[Any]=24 , __a : Optional[int]=16 , __a : Optional[int]=4096 , __a : Union[str, Any]="gelu" , __a : Optional[int]=0.1 , __a : Tuple=0.1 , __a : Optional[Any]=514 , __a : Optional[int]=1 , __a : Dict=0.02 , __a : Optional[int]=0.02 , __a : int=1E-05 , __a : int=1 , __a : Any=0 , __a : Union[str, Any]=2 , __a : Any="absolute" , __a : Any=True , __a : Tuple=768 , **__a : Union[str, Any] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
__lowercase : Optional[int] = vocab_size
__lowercase : List[str] = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : str = num_attention_heads
__lowercase : Any = hidden_act
__lowercase : int = intermediate_size
__lowercase : Optional[int] = hidden_dropout_prob
__lowercase : List[str] = attention_probs_dropout_prob
__lowercase : int = max_position_embeddings
__lowercase : Optional[Any] = type_vocab_size
__lowercase : Any = initializer_range
__lowercase : Optional[Any] = initializer_factor
__lowercase : int = layer_norm_eps
__lowercase : List[str] = position_embedding_type
__lowercase : Any = use_cache
__lowercase : Optional[Any] = project_dim
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Any = '''altclip_vision_model'''
def __init__( self : Tuple , __a : List[Any]=768 , __a : Optional[int]=3072 , __a : Optional[Any]=512 , __a : Union[str, Any]=12 , __a : Any=12 , __a : Optional[int]=3 , __a : Any=224 , __a : Dict=32 , __a : int="quick_gelu" , __a : Dict=1E-5 , __a : Dict=0.0 , __a : Optional[Any]=0.02 , __a : Dict=1.0 , **__a : Optional[int] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**__a )
__lowercase : List[Any] = hidden_size
__lowercase : Optional[int] = intermediate_size
__lowercase : Tuple = projection_dim
__lowercase : List[Any] = num_hidden_layers
__lowercase : Optional[Any] = num_attention_heads
__lowercase : str = num_channels
__lowercase : Optional[int] = patch_size
__lowercase : Optional[Any] = image_size
__lowercase : List[str] = initializer_range
__lowercase : Optional[int] = initializer_factor
__lowercase : str = attention_dropout
__lowercase : str = layer_norm_eps
__lowercase : List[Any] = hidden_act
@classmethod
def lowerCAmelCase ( cls : int , __a : Union[str, os.PathLike] , **__a : List[Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__a )
__lowercase : Optional[Any] = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
__lowercase : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : str = '''altclip'''
_A : str = True
def __init__( self : Optional[Any] , __a : Union[str, Any]=None , __a : str=None , __a : Optional[int]=768 , __a : Optional[int]=2.6592 , **__a : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = kwargs.pop("""text_config_dict""" , __a )
__lowercase : Optional[int] = kwargs.pop("""vision_config_dict""" , __a )
super().__init__(**__a )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
__lowercase : Optional[Any] = {}
# This is the complete result when using `text_config_dict`.
__lowercase : Tuple = AltCLIPTextConfig(**__a ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
__lowercase : List[Any] = (
F"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
F"The value `text_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__lowercase : Union[str, Any] = (
F"`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The "
F"value `text_config[\"{key}\"]` will be overriden."
)
logger.warning(__a )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
__lowercase : Tuple = {}
# This is the complete result when using `vision_config_dict`.
__lowercase : Any = AltCLIPVisionConfig(**__a ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
__lowercase : int = {
str(__a ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
__lowercase : Optional[int] = (
F"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
F"values. The value `vision_config_dict[\"{key}\"]` will be used instead."
)
# If inferred from default argument values (just to be super careful)
else:
__lowercase : Optional[int] = (
F"`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. "
F"The value `vision_config[\"{key}\"]` will be overriden."
)
logger.warning(__a )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
__lowercase : List[str] = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
__lowercase : int = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
__lowercase : int = AltCLIPTextConfig(**__a )
__lowercase : int = AltCLIPVisionConfig(**__a )
__lowercase : Optional[int] = projection_dim
__lowercase : Dict = logit_scale_init_value
__lowercase : Any = 1.0
@classmethod
def lowerCAmelCase ( cls : Optional[int] , __a : AltCLIPTextConfig , __a : AltCLIPVisionConfig , **__a : List[Any] ) -> int:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : Tuple = copy.deepcopy(self.__dict__ )
__lowercase : Optional[Any] = self.text_config.to_dict()
__lowercase : int = self.vision_config.to_dict()
__lowercase : Tuple = self.__class__.model_type
return output
| 365
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : List[str] = 2_56
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
__lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( lowerCAmelCase_ : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , *,
__a : float = 0.85 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = duplication_jaccard_threshold
__lowercase : Optional[Any] = NUM_PERM
__lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase : List[str] = defaultdict(__a )
def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None:
"""simple docstring"""
__lowercase : List[Any] = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase : Dict = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase : List[str] = [base] + list(__a )
# reformat the cluster to be a list of dict
__lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def lowerCAmelCase ( self : Any , __a : int ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase , __lowercase : Union[str, Any] = element
__lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ):
__lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[str] = get_tokens(lowerCAmelCase_ )
__lowercase : Dict = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : List[str] = None
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = []
for elementa in cluster:
__lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase : Dict = 1
extremes.append(lowerCAmelCase_ )
return extremes
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
global _shared_dataset
__lowercase : Tuple = dataset
__lowercase : Optional[int] = []
__lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ):
__lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase : int = {}
__lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase : Optional[Any] = element
__lowercase : int = duplicate_indices - set(extreme_dict.keys() )
__lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(lowerCAmelCase_ )}" )
print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" )
print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" )
return ds_filter, duplicate_clusters
| 306
| 0
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] , __a : str , __a : Optional[Any]=13 , __a : str=30 , __a : Optional[int]=2 , __a : List[Any]=3 , __a : List[str]=True , __a : Optional[int]=True , __a : List[str]=32 , __a : str=5 , __a : Any=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : Any=0.1 , __a : Optional[Any]=0.1 , __a : Any=10 , __a : int=0.02 , __a : Optional[Any]=None , __a : List[str]=2 , ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[Any] = parent
__lowercase : Any = batch_size
__lowercase : Optional[int] = image_size
__lowercase : str = patch_size
__lowercase : str = num_channels
__lowercase : str = is_training
__lowercase : List[Any] = use_labels
__lowercase : int = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Optional[int] = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Tuple = hidden_dropout_prob
__lowercase : Dict = attention_probs_dropout_prob
__lowercase : List[str] = type_sequence_label_size
__lowercase : str = initializer_range
__lowercase : Union[str, Any] = scope
__lowercase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : str = (image_size // patch_size) ** 2
__lowercase : int = num_patches + 1
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : Union[str, Any] = None
if self.use_labels:
__lowercase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCAmelCase ( self : List[str] , __a : List[Any] , __a : List[str] , __a : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase : List[str] = ViTModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Tuple , __a : Dict , __a : Dict ) -> Any:
"""simple docstring"""
__lowercase : str = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowercase : Dict = 1
__lowercase : str = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase : int = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Optional[Any] , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = self.type_sequence_label_size
__lowercase : Optional[int] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase : Optional[Any] = 1
__lowercase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
(
__lowercase
) : int = config_and_inputs
__lowercase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
_A : Tuple = True
_A : Optional[int] = False
_A : Optional[int] = False
_A : Union[str, Any] = False
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : str = ViTModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Tuple = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowercase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : int = model_class(__a )
__lowercase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : Tuple = [*signature.parameters.keys()]
__lowercase : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase : int = ViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" ).to(__a )
__lowercase : Optional[Any] = self.default_image_processor
__lowercase : Dict = prepare_img()
__lowercase : Dict = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : Any = model(**__a )
# verify the logits
__lowercase : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : Union[str, Any] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" ).to(__a )
__lowercase : Any = ViTImageProcessor.from_pretrained("""facebook/dino-vits8""" , size=480 )
__lowercase : int = prepare_img()
__lowercase : Tuple = image_processor(images=__a , return_tensors="""pt""" )
__lowercase : str = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
__lowercase : Optional[Any] = model(__a , interpolate_pos_encoding=__a )
# verify the logits
__lowercase : Dict = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
__lowercase : Tuple = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = ViTModel.from_pretrained("""facebook/dino-vits8""" , torch_dtype=torch.floataa , device_map="""auto""" )
__lowercase : int = self.default_image_processor
__lowercase : Dict = prepare_img()
__lowercase : List[Any] = image_processor(images=__a , return_tensors="""pt""" )
__lowercase : Optional[Any] = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
| 366
|
from ...processing_utils import ProcessorMixin
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''image_processor''', '''feature_extractor''']
_A : List[Any] = '''TvltImageProcessor'''
_A : Optional[int] = '''TvltFeatureExtractor'''
def __init__( self : str , __a : List[Any] , __a : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().__init__(image_processor=__a , feature_extractor=__a )
__lowercase : Union[str, Any] = image_processor
__lowercase : Tuple = feature_extractor
def __call__( self : Tuple , __a : Optional[int]=None , __a : Dict=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : Optional[Any]=False , __a : List[Any]=False , *__a : List[str] , **__a : List[Any] , ) -> Dict:
"""simple docstring"""
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__lowercase : Tuple = None
if images is not None:
__lowercase : Any = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
__lowercase : Union[str, Any] = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
__lowercase : Optional[Any] = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
__lowercase : Tuple = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : int = self.image_processor.model_input_names
__lowercase : Union[str, Any] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 306
| 0
|
from collections.abc import Sequence
def snake_case_ ( lowerCAmelCase_ : Sequence[int] | None = None ):
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
__lowercase : str = nums[0]
for i in range(1 , len(lowerCAmelCase_ ) ):
__lowercase : Dict = nums[i]
__lowercase : List[Any] = max(lowerCAmelCase_ , ans + num , lowerCAmelCase_ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCamelCase : Optional[Any] = int(input('''Enter number of elements : ''').strip())
lowerCamelCase : Optional[Any] = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n]
print(max_subsequence_sum(array))
| 367
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any , __a : Tuple , __a : Optional[int]=13 , __a : int=7 , __a : List[str]=False , __a : Optional[int]=True , __a : Optional[int]=False , __a : Dict=True , __a : Optional[int]=33 , __a : Dict=32 , __a : Optional[int]=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : List[str]=0.1 , __a : Dict=0.1 , __a : List[Any]=512 , __a : Any=16 , __a : Optional[Any]=2 , __a : List[Any]=0.02 , __a : int=3 , __a : Union[str, Any]=4 , __a : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : int = batch_size
__lowercase : Any = seq_length
__lowercase : str = is_training
__lowercase : str = use_input_mask
__lowercase : Optional[int] = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : int = hidden_size
__lowercase : List[Any] = num_hidden_layers
__lowercase : Dict = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : List[Any] = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : Union[str, Any] = type_vocab_size
__lowercase : Dict = type_sequence_label_size
__lowercase : Union[str, Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : Tuple = scope
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : str = None
__lowercase : Optional[Any] = None
__lowercase : Tuple = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[Any] , __a : int , __a : int , __a : Dict , __a : Union[str, Any] , __a : List[str] , __a : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
__lowercase : str = model(__a , attention_mask=__a )
__lowercase : List[Any] = model(__a )
__lowercase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __a : Dict , __a : List[Any] , __a : Tuple , __a : Union[str, Any] , __a : str , __a : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[str] = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowercase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[int] , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : Tuple , __a : Optional[int] , __a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = self.num_labels
__lowercase : Any = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : List[str] = config_and_inputs
__lowercase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Optional[Any] = False
_A : Any = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Optional[Any] = ()
_A : List[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Optional[Any] = True
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = EsmModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowercase : Union[str, Any] = type
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[str] = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : List[str] = EsmEmbeddings(config=__a )
__lowercase : Union[str, Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowercase : int = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowercase : str = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
__lowercase : Optional[Any] = EsmEmbeddings(config=__a )
__lowercase : Optional[int] = torch.empty(2 , 4 , 30 )
__lowercase : Tuple = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowercase : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowercase : Any = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( __a ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : Tuple = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowercase : List[str] = model(__a )[0]
__lowercase : Union[str, Any] = 33
__lowercase : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
__lowercase : List[Any] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
@slow
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
__lowercase : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
__lowercase : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowercase : Any = model(__a )[0]
# compare the actual values for a slice.
__lowercase : int = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
| 306
| 0
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 368
|
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def snake_case_ ( lowerCAmelCase_ : int = 5000 ):
__lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )]
for i, pentagonal_i in enumerate(lowerCAmelCase_ ):
for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ):
__lowercase : int = pentagonal_nums[j]
__lowercase : Optional[int] = pentagonal_i + pentagonal_j
__lowercase : Union[str, Any] = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 306
| 0
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCAmelCase ( __a ):
'''simple docstring'''
@require_torch
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[int] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
__lowercase : int = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
__lowercase : List[Any] = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
__lowercase : Optional[Any] = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(__a )
BertModel.from_pretrained(__a )
BertTokenizer.from_pretrained(__a )
pipeline(task="""fill-mask""" , model=__a )
# baseline - just load from_pretrained with normal network
__lowercase : str = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
__lowercase : Optional[int] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__lowercase : Dict = """1"""
__lowercase : List[str] = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Any = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
__lowercase : Optional[Any] = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
__lowercase : int = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
__lowercase : List[str] = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(__a )
BertModel.from_pretrained(__a )
BertTokenizer.from_pretrained(__a )
pipeline(task="""fill-mask""" , model=__a )
# baseline - just load from_pretrained with normal network
__lowercase : str = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
__lowercase : Optional[int] = self.get_env()
__lowercase : Tuple = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase : Tuple = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
__lowercase : int = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
__lowercase : str = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
__lowercase : str = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
__lowercase : List[str] = self.get_env()
__lowercase : Any = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
__lowercase : List[str] = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__lowercase : Dict = """1"""
__lowercase : List[str] = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase : str = """
from transformers import pipeline
"""
__lowercase : List[str] = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
__lowercase : List[str] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
__lowercase : str = self.get_env()
__lowercase : str = """1"""
__lowercase : Tuple = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
__lowercase : Optional[int] = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase : List[str] = """
from transformers import AutoModel
"""
__lowercase : Any = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
__lowercase : Any = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
__lowercase : Dict = self.get_env()
__lowercase : Dict = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__lowercase : List[str] = """1"""
__lowercase : Any = subprocess.run(__a , env=__a , check=__a , capture_output=__a )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 369
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = (DPMSolverSDEScheduler,)
_A : Dict = 10
def lowerCAmelCase ( self : Optional[int] , **__a : Dict ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**__a )
return config
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__a )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[int] = self.scheduler_classes[0]
__lowercase : List[str] = self.get_scheduler_config()
__lowercase : Any = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[Any] = self.dummy_model()
__lowercase : str = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Optional[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Optional[Any] = scheduler.step(__a , __a , __a )
__lowercase : str = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Union[str, Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47821044921875 ) < 1E-2
assert abs(result_mean.item() - 0.2178705964565277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59352111816406 ) < 1E-2
assert abs(result_mean.item() - 0.22342906892299652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowercase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
__lowercase : Optional[int] = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowercase : Dict = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
__lowercase : Dict = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[int] = model(__a , __a )
__lowercase : Optional[int] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : Optional[Any] = torch.sum(torch.abs(__a ) )
__lowercase : List[str] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77149200439453 ) < 1E-2
assert abs(result_mean.item() - 0.16226289014816284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1663360595703 ) < 1E-2
assert abs(result_mean.item() - 0.16688326001167297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8487548828125 ) < 1E-2
assert abs(result_mean.item() - 0.1560530662536621 ) < 1E-3
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = self.scheduler_classes[0]
__lowercase : Dict = self.get_scheduler_config()
__lowercase : Optional[int] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : int = self.dummy_model()
__lowercase : Optional[Any] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__lowercase : int = scheduler.scale_model_input(__a , __a )
__lowercase : List[str] = model(__a , __a )
__lowercase : List[str] = scheduler.step(__a , __a , __a )
__lowercase : int = output.prev_sample
__lowercase : List[Any] = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46957397460938 ) < 1E-2
assert abs(result_mean.item() - 0.21805934607982635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59353637695312 ) < 1E-2
assert abs(result_mean.item() - 0.22342908382415771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52383422851562 ) < 1E-2
assert abs(result_mean.item() - 0.211619570851326 ) < 1E-3
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase : str = self.scheduler_classes[0]
__lowercase : List[Any] = self.get_scheduler_config()
__lowercase : Tuple = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
__lowercase : List[str] = self.dummy_model()
__lowercase : Optional[int] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
__lowercase : str = sample.to(__a )
for t in scheduler.timesteps:
__lowercase : List[Any] = scheduler.scale_model_input(__a , __a )
__lowercase : Optional[Any] = model(__a , __a )
__lowercase : Any = scheduler.step(__a , __a , __a )
__lowercase : Optional[Any] = output.prev_sample
__lowercase : Any = torch.sum(torch.abs(__a ) )
__lowercase : Optional[Any] = torch.mean(torch.abs(__a ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66974135742188 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63653564453125 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3135223388672 ) < 1E-2
assert abs(result_mean.item() - 0.23003872730981811 ) < 1E-2
| 306
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCamelCase : Dict = False
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return 12
@property
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return 12
@property
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(__a )
@property
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
__lowercase : Any = 12
__lowercase : Any = 12
__lowercase : Dict = {
"""attention_bias""": True,
"""cross_attention_dim""": 32,
"""attention_head_dim""": height * width,
"""num_attention_heads""": 1,
"""num_vector_embeds""": self.num_embed,
"""num_embeds_ada_norm""": self.num_embeds_ada_norm,
"""norm_num_groups""": 32,
"""sample_size""": width,
"""activation_fn""": """geglu-approximate""",
}
__lowercase : int = TransformeraDModel(**__a )
return model
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : List[str] = """cpu"""
__lowercase : str = self.dummy_vqvae
__lowercase : Dict = self.dummy_text_encoder
__lowercase : str = self.dummy_tokenizer
__lowercase : Union[str, Any] = self.dummy_transformer
__lowercase : Optional[Any] = VQDiffusionScheduler(self.num_embed )
__lowercase : List[Any] = LearnedClassifierFreeSamplingEmbeddings(learnable=__a )
__lowercase : int = VQDiffusionPipeline(
vqvae=__a , text_encoder=__a , tokenizer=__a , transformer=__a , scheduler=__a , learned_classifier_free_sampling_embeddings=__a , )
__lowercase : Union[str, Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Dict = """teddy bear playing in the pool"""
__lowercase : List[str] = torch.Generator(device=__a ).manual_seed(0 )
__lowercase : Tuple = pipe([prompt] , generator=__a , num_inference_steps=2 , output_type="""np""" )
__lowercase : Optional[Any] = output.images
__lowercase : List[Any] = torch.Generator(device=__a ).manual_seed(0 )
__lowercase : int = pipe(
[prompt] , generator=__a , output_type="""np""" , return_dict=__a , num_inference_steps=2 )[0]
__lowercase : Optional[Any] = image[0, -3:, -3:, -1]
__lowercase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase : str = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : int = """cpu"""
__lowercase : Dict = self.dummy_vqvae
__lowercase : List[Any] = self.dummy_text_encoder
__lowercase : Dict = self.dummy_tokenizer
__lowercase : Dict = self.dummy_transformer
__lowercase : int = VQDiffusionScheduler(self.num_embed )
__lowercase : int = LearnedClassifierFreeSamplingEmbeddings(
learnable=__a , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__lowercase : List[str] = VQDiffusionPipeline(
vqvae=__a , text_encoder=__a , tokenizer=__a , transformer=__a , scheduler=__a , learned_classifier_free_sampling_embeddings=__a , )
__lowercase : Optional[Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__lowercase : Tuple = """teddy bear playing in the pool"""
__lowercase : List[Any] = torch.Generator(device=__a ).manual_seed(0 )
__lowercase : int = pipe([prompt] , generator=__a , num_inference_steps=2 , output_type="""np""" )
__lowercase : Any = output.images
__lowercase : Optional[int] = torch.Generator(device=__a ).manual_seed(0 )
__lowercase : Any = pipe(
[prompt] , generator=__a , output_type="""np""" , return_dict=__a , num_inference_steps=2 )[0]
__lowercase : List[str] = image[0, -3:, -3:, -1]
__lowercase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowercase : int = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" )
__lowercase : Dict = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" )
__lowercase : Any = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowercase : Optional[Any] = torch.Generator(device=__a ).manual_seed(0 )
__lowercase : Union[str, Any] = pipeline(
"""teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=__a , output_type="""np""" , )
__lowercase : List[Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 370
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : str = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=3_84,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=1_28,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
lowerCamelCase : Dict = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : str = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
lowerCamelCase : List[str] = args.per_device_eval_batch_size
lowerCamelCase : Any = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
lowerCamelCase : Optional[Any] = '''temp_engine/bert-fp16.engine'''
if args.inta:
lowerCamelCase : int = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
lowerCamelCase : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : Dict = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : List[str] = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ):
__lowercase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
__lowercase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
__lowercase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCAmelCase_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCAmelCase_ )
# start time
__lowercase : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCAmelCase_ ) for d_inp in d_inputs] + [int(lowerCAmelCase_ ), int(lowerCAmelCase_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
cuda.memcpy_dtoh_async(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
__lowercase : int = time.time()
__lowercase : Union[str, Any] = end_time - start_time
__lowercase : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Optional[Any] = raw_datasets['''validation'''].column_names
lowerCamelCase : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0]
lowerCamelCase : str = '''context''' if '''context''' in column_names else column_names[1]
lowerCamelCase : Dict = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Dict = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
lowerCamelCase : Tuple = min(args.max_seq_length, tokenizer.model_max_length)
def snake_case_ ( lowerCAmelCase_ : int ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
__lowercase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__lowercase : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCAmelCase_ , stride=args.doc_stride , return_overflowing_tokens=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__lowercase : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__lowercase : Any = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__lowercase : Dict = tokenized_examples.sequence_ids(lowerCAmelCase_ )
__lowercase : List[Any] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__lowercase : List[str] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__lowercase : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
lowerCamelCase : Tuple = raw_datasets['''validation''']
# Validation Feature Creation
lowerCamelCase : Optional[int] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
lowerCamelCase : Union[str, Any] = default_data_collator
lowerCamelCase : Optional[Any] = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
lowerCamelCase : List[str] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
__lowercase : int = postprocess_qa_predictions(
examples=lowerCAmelCase_ , features=lowerCAmelCase_ , predictions=lowerCAmelCase_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCAmelCase_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__lowercase : Optional[int] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__lowercase : List[Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__lowercase : Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCAmelCase_ , label_ids=lowerCAmelCase_ )
lowerCamelCase : Dict = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def snake_case_ ( lowerCAmelCase_ : str ):
return trt.volume(engine.get_binding_shape(lowerCAmelCase_ ) ) * engine.get_binding_dtype(lowerCAmelCase_ ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : int = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : str = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : Dict = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : Optional[int] = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
lowerCamelCase : int = 0.0
lowerCamelCase : List[str] = 0
lowerCamelCase : List[str] = timeit.default_timer()
lowerCamelCase : List[Any] = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase ,lowerCamelCase : str = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase ,lowerCamelCase : Union[str, Any] = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
lowerCamelCase : Dict = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
lowerCamelCase : List[Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Dict = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
lowerCamelCase : Tuple = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : Dict = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 10_00 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 10_00))
logger.info('''Total Number of Inference = %d''', niter)
lowerCamelCase : str = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Optional[Any] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 306
| 0
|
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float ):
# For applying gaussian function for each element in matrix.
__lowercase : Dict = math.sqrt(lowerCAmelCase_ )
__lowercase : Dict = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
__lowercase : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : float ):
# Creates a gaussian kernel of given dimension.
__lowercase : Optional[Any] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , lowerCAmelCase_ ):
for j in range(0 , lowerCAmelCase_ ):
__lowercase : List[str] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(lowerCAmelCase_ , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : int , ):
__lowercase : Dict = np.zeros(img.shape )
__lowercase : Optional[int] = get_gauss_kernel(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : str = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__lowercase : Tuple = get_slice(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Union[str, Any] = img_s - img_s[kernel_size // 2, kernel_size // 2]
__lowercase : Dict = vec_gaussian(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : str = np.multiply(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : str = np.multiply(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = np.sum(lowerCAmelCase_ ) / np.sum(lowerCAmelCase_ )
__lowercase : Union[str, Any] = val
return imga
def snake_case_ ( lowerCAmelCase_ : list ):
__lowercase : str = args[1] if args[1:] else """../image_data/lena.jpg"""
__lowercase : Dict = float(args[2] ) if args[2:] else 1.0
__lowercase : Union[str, Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__lowercase : Optional[Any] = int(args[4] )
__lowercase : List[str] = kernel_size + abs(kernel_size % 2 - 1 )
else:
__lowercase : int = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = parse_args(sys.argv)
lowerCamelCase : Optional[int] = cva.imread(filename, 0)
cva.imshow('''input image''', img)
lowerCamelCase : Optional[int] = img / 2_55
lowerCamelCase : str = out.astype('''float32''')
lowerCamelCase : Optional[Any] = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCamelCase : Any = out * 2_55
lowerCamelCase : int = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 371
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''facebook/nllb-moe-54B''': '''https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''nllb-moe'''
_A : List[str] = ['''past_key_values''']
_A : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Dict , __a : List[str]=128112 , __a : List[Any]=1024 , __a : List[Any]=12 , __a : Union[str, Any]=4096 , __a : List[str]=16 , __a : int=12 , __a : Optional[int]=4096 , __a : str=16 , __a : List[Any]=0.05 , __a : Any=0.05 , __a : Dict=True , __a : Optional[Any]=True , __a : List[Any]="relu" , __a : Tuple=1024 , __a : Optional[Any]=0.1 , __a : Tuple=0.1 , __a : Any=0.0 , __a : Optional[Any]=0.02 , __a : List[str]=2 , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Tuple="float32" , __a : Optional[int]=False , __a : Optional[int]=128 , __a : str=64 , __a : Dict=4 , __a : str=4 , __a : List[str]=0.001 , __a : List[Any]=0.001 , __a : Optional[Any]="all" , __a : Optional[int]=False , __a : int=False , __a : int=1.0 , __a : Dict=0.2 , __a : Tuple=1 , __a : Optional[Any]=0 , __a : List[Any]=2 , __a : Any=False , **__a : Any , ) -> Any:
"""simple docstring"""
__lowercase : int = vocab_size
__lowercase : List[Any] = max_position_embeddings
__lowercase : Tuple = d_model
__lowercase : str = encoder_ffn_dim
__lowercase : List[str] = encoder_layers
__lowercase : int = encoder_attention_heads
__lowercase : List[Any] = decoder_ffn_dim
__lowercase : int = decoder_layers
__lowercase : Optional[int] = decoder_attention_heads
__lowercase : Union[str, Any] = dropout
__lowercase : str = attention_dropout
__lowercase : Any = activation_dropout
__lowercase : List[Any] = activation_function
__lowercase : List[str] = init_std
__lowercase : Optional[int] = encoder_layerdrop
__lowercase : str = decoder_layerdrop
__lowercase : Dict = use_cache
__lowercase : Optional[Any] = encoder_layers
__lowercase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowercase : List[Any] = router_z_loss_coef
__lowercase : Tuple = router_aux_loss_coef
__lowercase : str = decoder_sparse_step
__lowercase : Any = encoder_sparse_step
__lowercase : str = num_experts
__lowercase : List[Any] = expert_capacity
__lowercase : int = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}" )
__lowercase : Optional[int] = router_dtype
__lowercase : Any = router_ignore_padding_tokens
__lowercase : Optional[Any] = batch_prioritized_routing
__lowercase : str = second_expert_policy
__lowercase : List[str] = normalize_router_prob_before_dropping
__lowercase : List[Any] = moe_eval_capacity_token_fraction
__lowercase : List[str] = moe_token_dropout
__lowercase : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , **__a , )
| 306
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase : List[str] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : str , *__a : Optional[Any] , **__a : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 350
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[Any] = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['''PoolFormerFeatureExtractor''']
lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 306
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="""utf-8""" , check=__a , )
assert hasattr(self , """env""" )
def lowerCAmelCase ( self : Dict , __a : List[str]=1 ) -> int:
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=__a , instance_type=self.instance_type , debugger_hook_config=__a , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def lowerCAmelCase ( self : List[str] , __a : Optional[Any] ) -> str:
"""simple docstring"""
TrainingJobAnalytics(__a ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : Dict = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowercase : int = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
__lowercase : int = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase : Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __a )
| 351
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase : List[Any] = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 352
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
__lowercase : List[str] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowercase : Optional[Any] = model(__a )["""last_hidden_state"""]
__lowercase : Any = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
__lowercase : Dict = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 306
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
__lowercase : Dict = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
__lowercase : Optional[int] = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(__a ) , __a )
def lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__a ) , x.transpose() ) )
__lowercase : Dict = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__a , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = np.random.randn(3 , 4 )
__lowercase : Dict = torch.tensor(__a )
self.assertTrue(np.allclose(transpose(__a ) , transpose(__a ).numpy() ) )
__lowercase : Optional[int] = np.random.randn(3 , 4 , 5 )
__lowercase : Optional[int] = torch.tensor(__a )
self.assertTrue(np.allclose(transpose(__a , axes=(1, 2, 0) ) , transpose(__a , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = np.random.randn(3 , 4 )
__lowercase : int = tf.constant(__a )
self.assertTrue(np.allclose(transpose(__a ) , transpose(__a ).numpy() ) )
__lowercase : Optional[Any] = np.random.randn(3 , 4 , 5 )
__lowercase : str = tf.constant(__a )
self.assertTrue(np.allclose(transpose(__a , axes=(1, 2, 0) ) , transpose(__a , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Any = np.random.randn(3 , 4 )
__lowercase : Any = jnp.array(__a )
self.assertTrue(np.allclose(transpose(__a ) , np.asarray(transpose(__a ) ) ) )
__lowercase : Any = np.random.randn(3 , 4 , 5 )
__lowercase : Dict = jnp.array(__a )
self.assertTrue(np.allclose(transpose(__a , axes=(1, 2, 0) ) , np.asarray(transpose(__a , axes=(1, 2, 0) ) ) ) )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__a , (4, 3) ) , np.reshape(__a , (4, 3) ) ) )
__lowercase : Optional[int] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__a , (12, 5) ) , np.reshape(__a , (12, 5) ) ) )
@require_torch
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : List[Any] = np.random.randn(3 , 4 )
__lowercase : str = torch.tensor(__a )
self.assertTrue(np.allclose(reshape(__a , (4, 3) ) , reshape(__a , (4, 3) ).numpy() ) )
__lowercase : int = np.random.randn(3 , 4 , 5 )
__lowercase : Dict = torch.tensor(__a )
self.assertTrue(np.allclose(reshape(__a , (12, 5) ) , reshape(__a , (12, 5) ).numpy() ) )
@require_tf
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = np.random.randn(3 , 4 )
__lowercase : Any = tf.constant(__a )
self.assertTrue(np.allclose(reshape(__a , (4, 3) ) , reshape(__a , (4, 3) ).numpy() ) )
__lowercase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
__lowercase : List[Any] = tf.constant(__a )
self.assertTrue(np.allclose(reshape(__a , (12, 5) ) , reshape(__a , (12, 5) ).numpy() ) )
@require_flax
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase : str = np.random.randn(3 , 4 )
__lowercase : Optional[int] = jnp.array(__a )
self.assertTrue(np.allclose(reshape(__a , (4, 3) ) , np.asarray(reshape(__a , (4, 3) ) ) ) )
__lowercase : Optional[int] = np.random.randn(3 , 4 , 5 )
__lowercase : Dict = jnp.array(__a )
self.assertTrue(np.allclose(reshape(__a , (12, 5) ) , np.asarray(reshape(__a , (12, 5) ) ) ) )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__a ) , np.squeeze(__a ) ) )
__lowercase : Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__a , axis=2 ) , np.squeeze(__a , axis=2 ) ) )
@require_torch
def lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = np.random.randn(1 , 3 , 4 )
__lowercase : int = torch.tensor(__a )
self.assertTrue(np.allclose(squeeze(__a ) , squeeze(__a ).numpy() ) )
__lowercase : List[str] = np.random.randn(1 , 4 , 1 , 5 )
__lowercase : Optional[Any] = torch.tensor(__a )
self.assertTrue(np.allclose(squeeze(__a , axis=2 ) , squeeze(__a , axis=2 ).numpy() ) )
@require_tf
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : int = np.random.randn(1 , 3 , 4 )
__lowercase : Tuple = tf.constant(__a )
self.assertTrue(np.allclose(squeeze(__a ) , squeeze(__a ).numpy() ) )
__lowercase : Tuple = np.random.randn(1 , 4 , 1 , 5 )
__lowercase : Any = tf.constant(__a )
self.assertTrue(np.allclose(squeeze(__a , axis=2 ) , squeeze(__a , axis=2 ).numpy() ) )
@require_flax
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = np.random.randn(1 , 3 , 4 )
__lowercase : int = jnp.array(__a )
self.assertTrue(np.allclose(squeeze(__a ) , np.asarray(squeeze(__a ) ) ) )
__lowercase : Tuple = np.random.randn(1 , 4 , 1 , 5 )
__lowercase : Any = jnp.array(__a )
self.assertTrue(np.allclose(squeeze(__a , axis=2 ) , np.asarray(squeeze(__a , axis=2 ) ) ) )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__a , axis=1 ) , np.expand_dims(__a , axis=1 ) ) )
@require_torch
def lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
__lowercase : str = np.random.randn(3 , 4 )
__lowercase : Optional[int] = torch.tensor(__a )
self.assertTrue(np.allclose(expand_dims(__a , axis=1 ) , expand_dims(__a , axis=1 ).numpy() ) )
@require_tf
def lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
__lowercase : str = np.random.randn(3 , 4 )
__lowercase : Tuple = tf.constant(__a )
self.assertTrue(np.allclose(expand_dims(__a , axis=1 ) , expand_dims(__a , axis=1 ).numpy() ) )
@require_flax
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = np.random.randn(3 , 4 )
__lowercase : Union[str, Any] = jnp.array(__a )
self.assertTrue(np.allclose(expand_dims(__a , axis=1 ) , np.asarray(expand_dims(__a , axis=1 ) ) ) )
| 353
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
__lowercase : str = len(lowerCAmelCase_ )
__lowercase : Optional[int] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__lowercase : Tuple = True
for i in range(lowerCAmelCase_ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__lowercase : Optional[Any] = True
if a[i].islower():
__lowercase : Dict = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCamelCase : List[str] = logging.getLogger(__name__)
def snake_case_ ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ):
__lowercase : Tuple = np.argmax(lowerCAmelCase_ , axis=1 )
return np.sum(outputs == labels )
def snake_case_ ( lowerCAmelCase_ : List[str] ):
with open(lowerCAmelCase_ , encoding="""utf_8""" ) as f:
__lowercase : Union[str, Any] = csv.reader(lowerCAmelCase_ )
__lowercase : Optional[int] = []
next(lowerCAmelCase_ ) # skip the first line
for line in tqdm(lowerCAmelCase_ ):
output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = []
for dataset in encoded_datasets:
__lowercase : Any = len(lowerCAmelCase_ )
__lowercase : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
__lowercase : Dict = np.zeros((n_batch, 2) , dtype=np.intaa )
__lowercase : Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
__lowercase : Any = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(lowerCAmelCase_ ):
__lowercase : List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowercase : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
__lowercase : Any = with_conta
__lowercase : List[Any] = with_conta
__lowercase : Optional[int] = len(lowerCAmelCase_ ) - 1
__lowercase : int = len(lowerCAmelCase_ ) - 1
__lowercase : Dict = with_conta
__lowercase : Any = with_conta
__lowercase : Optional[Any] = mc_label
__lowercase : Any = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(lowerCAmelCase_ ) for t in all_inputs ) )
return tensor_datasets
def snake_case_ ( ):
__lowercase : Optional[int] = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=lowerCAmelCase_ , default="""openai-gpt""" , help="""pretrained model name""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" )
parser.add_argument(
"""--output_dir""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=lowerCAmelCase_ , default="""""" )
parser.add_argument("""--eval_dataset""" , type=lowerCAmelCase_ , default="""""" )
parser.add_argument("""--seed""" , type=lowerCAmelCase_ , default=42 )
parser.add_argument("""--num_train_epochs""" , type=lowerCAmelCase_ , default=3 )
parser.add_argument("""--train_batch_size""" , type=lowerCAmelCase_ , default=8 )
parser.add_argument("""--eval_batch_size""" , type=lowerCAmelCase_ , default=16 )
parser.add_argument("""--adam_epsilon""" , default=1e-8 , type=lowerCAmelCase_ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , type=lowerCAmelCase_ , default=1 )
parser.add_argument(
"""--max_steps""" , default=-1 , type=lowerCAmelCase_ , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowerCAmelCase_ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=lowerCAmelCase_ , default=6.2_5e-5 )
parser.add_argument("""--warmup_steps""" , default=0 , type=lowerCAmelCase_ , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--lr_schedule""" , type=lowerCAmelCase_ , default="""warmup_linear""" )
parser.add_argument("""--weight_decay""" , type=lowerCAmelCase_ , default=0.01 )
parser.add_argument("""--lm_coef""" , type=lowerCAmelCase_ , default=0.9 )
parser.add_argument("""--n_valid""" , type=lowerCAmelCase_ , default=374 )
parser.add_argument("""--server_ip""" , type=lowerCAmelCase_ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=lowerCAmelCase_ , default="""""" , help="""Can be used for distant debugging.""" )
__lowercase : Dict = parser.parse_args()
print(lowerCAmelCase_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
__lowercase : int = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
__lowercase : Any = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(lowerCAmelCase_ , lowerCAmelCase_ ) )
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
__lowercase : List[str] = ["""_start_""", """_delimiter_""", """_classify_"""]
__lowercase : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(lowerCAmelCase_ )
__lowercase : Dict = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
__lowercase : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
model.to(lowerCAmelCase_ )
# Load and encode the datasets
def tokenize_and_encode(lowerCAmelCase_ : Optional[int] ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCAmelCase_ ) )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return obj
return [tokenize_and_encode(lowerCAmelCase_ ) for o in obj]
logger.info("""Encoding dataset...""" )
__lowercase : Any = load_rocstories_dataset(args.train_dataset )
__lowercase : Union[str, Any] = load_rocstories_dataset(args.eval_dataset )
__lowercase : List[Any] = (train_dataset, eval_dataset)
__lowercase : Union[str, Any] = tokenize_and_encode(lowerCAmelCase_ )
# Compute the max input length for the Transformer
__lowercase : List[Any] = model.config.n_positions // 2 - 2
__lowercase : int = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
__lowercase : Any = min(lowerCAmelCase_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
__lowercase : int = pre_process_datasets(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ )
__lowercase : Dict = tensor_datasets[0], tensor_datasets[1]
__lowercase : Optional[int] = TensorDataset(*lowerCAmelCase_ )
__lowercase : Optional[int] = RandomSampler(lowerCAmelCase_ )
__lowercase : Optional[Any] = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.train_batch_size )
__lowercase : Optional[int] = TensorDataset(*lowerCAmelCase_ )
__lowercase : List[str] = SequentialSampler(lowerCAmelCase_ )
__lowercase : str = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
__lowercase : List[Any] = args.max_steps
__lowercase : Union[str, Any] = args.max_steps // (len(lowerCAmelCase_ ) // args.gradient_accumulation_steps) + 1
else:
__lowercase : List[str] = len(lowerCAmelCase_ ) // args.gradient_accumulation_steps * args.num_train_epochs
__lowercase : List[Any] = list(model.named_parameters() )
__lowercase : int = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
__lowercase : List[str] = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
__lowercase : str = AdamW(lowerCAmelCase_ , lr=args.learning_rate , eps=args.adam_epsilon )
__lowercase : Tuple = get_linear_schedule_with_warmup(
lowerCAmelCase_ , num_warmup_steps=args.warmup_steps , num_training_steps=lowerCAmelCase_ )
if args.do_train:
__lowercase : str = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ):
__lowercase : Optional[int] = 0
__lowercase : str = 0
__lowercase : List[str] = tqdm(lowerCAmelCase_ , desc="""Training""" )
for step, batch in enumerate(lowerCAmelCase_ ):
__lowercase : Optional[int] = tuple(t.to(lowerCAmelCase_ ) for t in batch )
__lowercase : List[str] = batch
__lowercase : Optional[Any] = model(lowerCAmelCase_ , mc_token_ids=lowerCAmelCase_ , lm_labels=lowerCAmelCase_ , mc_labels=lowerCAmelCase_ )
__lowercase : Union[str, Any] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
__lowercase : Tuple = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
__lowercase : List[Any] = """Training loss: {:.2e} lr: {:.2e}""".format(lowerCAmelCase_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
__lowercase : Dict = model.module if hasattr(lowerCAmelCase_ , """module""" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
__lowercase : Union[str, Any] = os.path.join(args.output_dir , lowerCAmelCase_ )
__lowercase : str = os.path.join(args.output_dir , lowerCAmelCase_ )
torch.save(model_to_save.state_dict() , lowerCAmelCase_ )
model_to_save.config.to_json_file(lowerCAmelCase_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
__lowercase : Any = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
__lowercase : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(lowerCAmelCase_ )
if args.do_eval:
model.eval()
__lowercase : Tuple = 0, 0
__lowercase : str = 0, 0
for batch in tqdm(lowerCAmelCase_ , desc="""Evaluating""" ):
__lowercase : Tuple = tuple(t.to(lowerCAmelCase_ ) for t in batch )
__lowercase : str = batch
with torch.no_grad():
__lowercase : Any = model(
lowerCAmelCase_ , mc_token_ids=lowerCAmelCase_ , lm_labels=lowerCAmelCase_ , mc_labels=lowerCAmelCase_ )
__lowercase : List[Any] = mc_logits.detach().cpu().numpy()
__lowercase : Dict = mc_labels.to("""cpu""" ).numpy()
__lowercase : Optional[Any] = accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
__lowercase : Any = eval_loss / nb_eval_steps
__lowercase : Any = eval_accuracy / nb_eval_examples
__lowercase : List[Any] = tr_loss / nb_tr_steps if args.do_train else None
__lowercase : Any = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
__lowercase : List[Any] = os.path.join(args.output_dir , """eval_results.txt""" )
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , lowerCAmelCase_ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 354
|
from scipy.stats import spearmanr
import datasets
lowerCamelCase : List[str] = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
lowerCamelCase : List[str] = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
lowerCamelCase : Union[str, Any] = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , )
def lowerCAmelCase ( self : List[Any] , __a : str , __a : Any , __a : Optional[int]=False ) -> List[str]:
"""simple docstring"""
__lowercase : Optional[Any] = spearmanr(__a , __a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 306
| 0
|
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : int ):
__lowercase : Union[str, Any] = 0
__lowercase : Any = len(lowerCAmelCase_ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowercase : Tuple = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase_ ):
return None
__lowercase : Any = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
__lowercase : Any = left
__lowercase : Dict = point
elif point > right:
__lowercase : List[str] = right
__lowercase : Union[str, Any] = point
else:
if item < current_item:
__lowercase : int = point - 1
else:
__lowercase : Dict = point + 1
return None
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[Any] ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
__lowercase : Any = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(lowerCAmelCase_ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
elif point > right:
return interpolation_search_by_recursion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , point - 1 )
else:
return interpolation_search_by_recursion(
lowerCAmelCase_ , lowerCAmelCase_ , point + 1 , lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Any ):
if collection != sorted(lowerCAmelCase_ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
lowerCamelCase : Optional[int] = 0
if debug == 1:
lowerCamelCase : int = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
lowerCamelCase : str = 67
lowerCamelCase : Dict = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print('''Not found''')
| 355
|
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Any = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
__lowercase , __lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowercase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase : List[Any] = [0]
__lowercase : Optional[Any] = 0
__lowercase : List[Any] = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowercase : List[str] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
lowerCamelCase : Dict = '''abc1abc12'''
lowerCamelCase : Union[str, Any] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCamelCase : Any = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCamelCase : List[Any] = '''ABABX'''
lowerCamelCase : List[Any] = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCamelCase : int = '''AAAB'''
lowerCamelCase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCamelCase : Optional[Any] = '''abcdabcy'''
lowerCamelCase : Any = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCamelCase : Dict = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 306
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[Any] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = ['''MobileNetV2FeatureExtractor''']
lowerCamelCase : Any = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 356
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = ['''pixel_values''']
def __init__( self : Any , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : str , ) -> None:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Dict = size if size is not None else {"""shortest_edge""": 224}
__lowercase : Union[str, Any] = get_size_dict(__a , default_to_square=__a )
__lowercase : int = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__lowercase : Any = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
__lowercase : Optional[int] = do_resize
__lowercase : Union[str, Any] = size
__lowercase : List[Any] = resample
__lowercase : Any = do_center_crop
__lowercase : Dict = crop_size
__lowercase : int = do_rescale
__lowercase : Tuple = rescale_factor
__lowercase : List[Any] = do_normalize
__lowercase : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__lowercase : int = image_std if image_std is not None else OPENAI_CLIP_STD
__lowercase : Union[str, Any] = do_convert_rgb
def lowerCAmelCase ( self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[Any] , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Dict = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase : str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Any , ) -> np.ndarray:
"""simple docstring"""
__lowercase : Tuple = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ) -> List[str]:
"""simple docstring"""
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowerCAmelCase ( self : Optional[int] , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : List[Any] , ) -> PIL.Image.Image:
"""simple docstring"""
__lowercase : List[Any] = do_resize if do_resize is not None else self.do_resize
__lowercase : Dict = size if size is not None else self.size
__lowercase : Tuple = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
__lowercase : int = resample if resample is not None else self.resample
__lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase : List[Any] = crop_size if crop_size is not None else self.crop_size
__lowercase : List[str] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
__lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__lowercase : str = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize
__lowercase : Tuple = image_mean if image_mean is not None else self.image_mean
__lowercase : str = image_std if image_std is not None else self.image_std
__lowercase : str = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase : Union[str, Any] = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase : Union[str, Any] = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
__lowercase : Any = [to_numpy_array(__a ) for image in images]
if do_resize:
__lowercase : str = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
__lowercase : str = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
__lowercase : Dict = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
__lowercase : Optional[Any] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
__lowercase : Any = [to_channel_dimension_format(__a , __a ) for image in images]
__lowercase : Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 306
| 0
|
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ):
return params[F"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int="attention" ):
__lowercase : Any = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
__lowercase : str = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__lowercase : Optional[Any] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
__lowercase : List[Any] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__lowercase : int = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
__lowercase : List[Any] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__lowercase : int = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
__lowercase : str = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int]=False ):
if split_mlp_wi:
__lowercase : str = params[F"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
__lowercase : int = params[F"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
__lowercase : Optional[Any] = (wi_a, wi_a)
else:
__lowercase : Any = params[F"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
__lowercase : Optional[Any] = params[F"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ):
return params[F"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def snake_case_ ( lowerCAmelCase_ : dict , *, lowerCAmelCase_ : int , lowerCAmelCase_ : bool , lowerCAmelCase_ : bool = False ):
__lowercase : Any = traverse_util.flatten_dict(variables["""target"""] )
__lowercase : str = {"""/""".join(lowerCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowercase : Optional[int] = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , lowerCAmelCase_ )
__lowercase : str = collections.OrderedDict()
# Shared embeddings.
__lowercase : Optional[int] = old["""token_embedder/embedding"""]
# Encoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
__lowercase : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" , """pre_attention_layer_norm""" )
__lowercase : str = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" , """attention""" )
__lowercase : Any = layer_norm
__lowercase : List[Any] = k.T
__lowercase : Tuple = o.T
__lowercase : Tuple = q.T
__lowercase : Optional[Any] = v.T
# Block i, layer 1 (MLP).
__lowercase : List[str] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" , """pre_mlp_layer_norm""" )
__lowercase : Union[str, Any] = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" , lowerCAmelCase_ )
__lowercase : List[Any] = layer_norm
if split_mlp_wi:
__lowercase : Any = wi[0].T
__lowercase : List[str] = wi[1].T
else:
__lowercase : str = wi.T
__lowercase : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowercase : Optional[int] = tax_relpos_bias_lookup(
lowerCAmelCase_ , lowerCAmelCase_ , """encoder""" ).T
__lowercase : Optional[int] = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
__lowercase : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , """encoder""" ).T
__lowercase : List[Any] = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
__lowercase : Any = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """pre_self_attention_layer_norm""" )
__lowercase : List[str] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """self_attention""" )
__lowercase : Union[str, Any] = layer_norm
__lowercase : List[Any] = k.T
__lowercase : List[str] = o.T
__lowercase : int = q.T
__lowercase : Dict = v.T
# Block i, layer 1 (Cross Attention).
__lowercase : Tuple = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """pre_cross_attention_layer_norm""" )
__lowercase : str = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """encoder_decoder_attention""" )
__lowercase : int = layer_norm
__lowercase : Optional[Any] = k.T
__lowercase : Optional[int] = o.T
__lowercase : List[Any] = q.T
__lowercase : Optional[Any] = v.T
# Block i, layer 2 (MLP).
__lowercase : Dict = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , """pre_mlp_layer_norm""" )
__lowercase : Union[str, Any] = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" , lowerCAmelCase_ )
__lowercase : List[str] = layer_norm
if split_mlp_wi:
__lowercase : Dict = wi[0].T
__lowercase : Optional[Any] = wi[1].T
else:
__lowercase : Dict = wi.T
__lowercase : Optional[int] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowercase : int = tax_relpos_bias_lookup(lowerCAmelCase_ , lowerCAmelCase_ , """decoder""" ).T
__lowercase : Optional[Any] = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowercase : Dict = old["""decoder/logits_dense/kernel"""].T
return new
def snake_case_ ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : bool ):
__lowercase : Any = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowercase : int = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowercase : Optional[Any] = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__lowercase : Dict = state_dict["""shared.weight"""]
return state_dict
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any ):
__lowercase : List[Any] = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
__lowercase : Tuple = convert_tax_to_pytorch(
lowerCAmelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase_ , scalable_attention=lowerCAmelCase_ )
__lowercase : int = make_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , ):
__lowercase : Union[str, Any] = MTaConfig.from_json_file(lowerCAmelCase_ )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowercase : Tuple = UMTaEncoderModel(lowerCAmelCase_ )
else:
__lowercase : Union[str, Any] = UMTaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowerCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase_ )
print("""Done""" )
if __name__ == "__main__":
lowerCamelCase : Optional[int] = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
lowerCamelCase : List[str] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 357
|
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : str ):
__lowercase : Tuple = s.rsplit(lowerCAmelCase_ , lowerCAmelCase_ )
return new.join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : List[Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = {}
__lowercase : Tuple = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase : List[str] = key.replace(F"{group_key}." , F"{group_key}.group." )
if "res_path" in key:
__lowercase : List[Any] = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
__lowercase : Union[str, Any] = rreplace(lowerCAmelCase_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
__lowercase : Tuple = rreplace(lowerCAmelCase_ , """.b""" , """.bias""" , 1 )
__lowercase : Dict = value.float()
return upgrade
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Tuple=True ):
from dall_e import Encoder
__lowercase : Any = Encoder()
if os.path.exists(lowerCAmelCase_ ):
__lowercase : List[Any] = torch.load(lowerCAmelCase_ )
else:
__lowercase : List[Any] = torch.hub.load_state_dict_from_url(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : int = ckpt.state_dict()
encoder.load_state_dict(lowerCAmelCase_ )
if config_path is not None:
__lowercase : Optional[int] = FlavaImageCodebookConfig.from_pretrained(lowerCAmelCase_ )
else:
__lowercase : List[str] = FlavaImageCodebookConfig()
__lowercase : Optional[Any] = FlavaImageCodebook(lowerCAmelCase_ ).eval()
__lowercase : List[Any] = encoder.state_dict()
__lowercase : Union[str, Any] = upgrade_state_dict(lowerCAmelCase_ )
hf_model.load_state_dict(lowerCAmelCase_ )
__lowercase : Dict = hf_model.state_dict()
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
__lowercase : Tuple = count_parameters(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(lowerCAmelCase_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 306
| 0
|
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Image , lowerCAmelCase_ : float ):
def brightness(lowerCAmelCase_ : int ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(lowerCAmelCase_ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
lowerCamelCase : List[str] = change_brightness(img, 1_00)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 358
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
__lowercase : List[str] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
__lowercase : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
__lowercase , __lowercase : Optional[Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
__lowercase : List[str] = ["""key_proj""", """value_proj""", """query_proj"""]
__lowercase : Optional[int] = {
"""self_attn""": """ngram_self_attn""",
"""cross_attn""": """encoder_attn""",
"""cross_attn_layer_norm""": """encoder_attn_layer_norm""",
"""feed_forward_layer_norm""": """final_layer_norm""",
"""feed_forward""": """""",
"""intermediate""": """fc1""",
"""output""": """fc2""",
"""key_proj""": """k_proj""",
"""query_proj""": """q_proj""",
"""value_proj""": """v_proj""",
"""word_embeddings""": """embed_tokens""",
"""embeddings_layer_norm""": """emb_layer_norm""",
"""relative_pos_embeddings""": """relative_linear""",
"""ngram_embeddings""": """ngram_input_embed""",
"""position_embeddings""": """embed_positions""",
}
for key in loading_info["missing_keys"]:
__lowercase : Tuple = key.split(""".""" )
if attributes[0] == "lm_head":
__lowercase : str = prophet
__lowercase : List[str] = prophet_old
else:
__lowercase : Tuple = prophet.prophetnet
__lowercase : Union[str, Any] = prophet_old.model
__lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowercase : Optional[int] = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
__lowercase : str = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : List[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowercase : Any = old_model.weight
logger.info(F"{attribute} is initialized." )
__lowercase : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowercase : Dict = old_model.bias
logger.info(F"{attribute} is initialized" )
__lowercase : int = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , """in_proj_weight""" ):
__lowercase : Dict = old_model.in_proj_weight.shape[0] // 3
__lowercase : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowercase : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowercase : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowercase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowercase : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowercase : int = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowercase : Optional[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowercase : int = True
break
if attribute.isdigit():
__lowercase : Tuple = model[int(lowerCAmelCase_ )]
__lowercase : int = old_model[int(lowerCAmelCase_ )]
else:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
__lowercase : int = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(F"{old_model} does not have {old_attribute}" )
__lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(F"{key} was not correctly initialized!" )
print(F"Saving model to {pytorch_dump_folder_path}" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 306
| 0
|
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[Any] , __a : List[str]=99 , __a : str=13 , __a : Dict=7 , __a : List[str]=9 , __a : Union[str, Any]=True , __a : Any=True , __a : str=False , __a : str=32 , __a : Dict=5 , __a : str=4 , __a : Optional[Any]=37 , __a : List[str]=8 , __a : int=0.1 , __a : str=0.002 , __a : Optional[int]=1 , __a : Dict=0 , __a : Optional[Any]=0 , __a : List[str]=None , __a : Tuple=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : Dict = batch_size
__lowercase : str = encoder_seq_length
__lowercase : int = decoder_seq_length
# For common tests
__lowercase : Dict = self.decoder_seq_length
__lowercase : str = is_training
__lowercase : Union[str, Any] = use_attention_mask
__lowercase : Optional[Any] = use_labels
__lowercase : List[Any] = vocab_size
__lowercase : int = hidden_size
__lowercase : Optional[int] = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : str = d_ff
__lowercase : Tuple = relative_attention_num_buckets
__lowercase : Optional[int] = dropout_rate
__lowercase : str = initializer_factor
__lowercase : str = eos_token_id
__lowercase : Dict = pad_token_id
__lowercase : Union[str, Any] = decoder_start_token_id
__lowercase : Optional[Any] = None
__lowercase : str = decoder_layers
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return TaConfig.from_pretrained("""google/umt5-base""" )
def lowerCAmelCase ( self : Dict , __a : Any , __a : Optional[Any] , __a : Optional[int] , __a : Optional[int]=None , __a : Dict=None , __a : List[Any]=None , __a : int=None , __a : Any=None , ) -> Optional[int]:
"""simple docstring"""
if attention_mask is None:
__lowercase : Optional[int] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__lowercase : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__lowercase : str = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__a )
if decoder_head_mask is None:
__lowercase : Any = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__a )
if cross_attn_head_mask is None:
__lowercase : str = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase : str = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__lowercase : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__lowercase : Any = input_ids.clamp(self.pad_token_id + 1 )
__lowercase : str = decoder_input_ids.clamp(self.pad_token_id + 1 )
__lowercase : Union[str, Any] = self.get_config()
__lowercase : Optional[int] = config.num_attention_heads
__lowercase : List[Any] = self.prepare_inputs_dict(__a , __a , __a )
return config, input_dict
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCAmelCase ( self : Tuple , __a : List[str] , __a : List[Any] , __a : Any , __a : Any , __a : Tuple , __a : List[str] , ) -> str:
"""simple docstring"""
__lowercase : Dict = UMTaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Any = model(
input_ids=__a , decoder_input_ids=__a , attention_mask=__a , decoder_attention_mask=__a , )
__lowercase : Any = model(input_ids=__a , decoder_input_ids=__a )
__lowercase : Optional[int] = result.last_hidden_state
__lowercase : Tuple = result.past_key_values
__lowercase : str = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__a ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCAmelCase ( self : Dict , __a : Tuple , __a : Any , __a : str , __a : List[Any] , __a : str , __a : List[str] , ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = UMTaModel(config=__a ).get_decoder().to(__a ).eval()
# first forward pass
__lowercase : List[Any] = model(__a , use_cache=__a )
__lowercase : Dict = model(__a )
__lowercase : List[Any] = model(__a , use_cache=__a )
self.parent.assertTrue(len(__a ) == len(__a ) )
self.parent.assertTrue(len(__a ) == len(__a ) + 1 )
__lowercase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__lowercase : int = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowercase : Dict = model(__a )["""last_hidden_state"""]
__lowercase : List[str] = model(__a , past_key_values=__a )["""last_hidden_state"""]
# select random slice
__lowercase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowercase : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
__lowercase : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1E-3 ) )
def lowerCAmelCase ( self : Tuple , __a : Dict , __a : Tuple , ) -> List[str]:
"""simple docstring"""
__lowercase : Dict = UMTaModel(config=__a ).to(__a ).half().eval()
__lowercase : Tuple = model(**__a )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__a ).any().item() )
@require_torch
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : int = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_A : Union[str, Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
_A : Tuple = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_A : str = True
_A : Tuple = False
_A : List[Any] = False
_A : List[Any] = True
_A : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
_A : str = [0.8, 0.9]
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase : Optional[Any] = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
__lowercase : List[str] = UMTaModel(config_and_inputs[0] ).to(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"{tmpdirname}/t5_test.onnx" , export_params=__a , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
__lowercase : List[str] = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
__lowercase : Any = config_and_inputs[0]
__lowercase : Tuple = UMTaForConditionalGeneration(__a ).eval()
model.to(__a )
__lowercase : Dict = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__a ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ),
}
for attn_name, (name, mask) in zip(__a , head_masking.items() ):
__lowercase : Dict = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__lowercase : Tuple = torch.ones(
config.num_decoder_layers , config.num_heads , device=__a )
__lowercase : int = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__a , return_dict_in_generate=__a , **__a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__lowercase : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : int = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__a ).to(__a )
__lowercase : List[Any] = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__a , legacy=__a )
__lowercase : List[str] = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
__lowercase : Dict = tokenizer(__a , return_tensors="""pt""" , padding=__a ).input_ids
# fmt: off
__lowercase : Tuple = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__a , __a )
__lowercase : Union[str, Any] = model.generate(input_ids.to(__a ) )
__lowercase : Optional[int] = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
__lowercase : int = tokenizer.batch_decode(__a )
self.assertEqual(__a , __a )
| 359
|
def snake_case_ ( lowerCAmelCase_ : int = 200 ):
__lowercase : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
__lowercase : List[str] = [0] * (pence + 1)
__lowercase : Optional[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCAmelCase_ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_00) == 7_36_82
| 306
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[int] = ['''audio_values''', '''audio_mask''']
def __init__( self : List[Any] , __a : Union[str, Any]=2048 , __a : Dict=1 , __a : Any=[16, 16] , __a : Union[str, Any]=128 , __a : Optional[Any]=44100 , __a : int=86 , __a : Any=2048 , __a : int=0.0 , **__a : Any , ) -> Tuple:
"""simple docstring"""
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , **__a , )
__lowercase : Any = spectrogram_length
__lowercase : Tuple = num_channels
__lowercase : Dict = patch_size
__lowercase : Any = feature_size // self.patch_size[1]
__lowercase : Optional[int] = n_fft
__lowercase : int = sampling_rate // hop_length_to_sampling_rate
__lowercase : Optional[Any] = sampling_rate
__lowercase : Dict = padding_value
__lowercase : int = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__a , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=__a , norm="""slaney""" , mel_scale="""slaney""" , ).T
def lowerCAmelCase ( self : Union[str, Any] , __a : np.array ) -> np.ndarray:
"""simple docstring"""
__lowercase : List[Any] = spectrogram(
__a , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
__lowercase : Union[str, Any] = log_spec[:, :-1]
__lowercase : Optional[int] = log_spec - 20.0
__lowercase : List[Any] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Tuple , __a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a : Optional[Union[str, TensorType]] = None , __a : Optional[bool] = True , __a : Optional[int] = None , __a : bool = False , __a : bool = False , **__a : Optional[int] , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__lowercase : Dict = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}" )
__lowercase : List[str] = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase : Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
__lowercase : int = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase : List[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase : str = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase : List[str] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __a ):
__lowercase : Optional[Any] = [np.asarray(__a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase : Dict = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase : List[Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase : List[Any] = np.array(__a ).astype(np.floataa )
# convert into correct format for padding
__lowercase : List[Any] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase : Optional[int] = np.ones([len(__a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase : List[str] = padded_audio_features * self.padding_value
for i in range(len(__a ) ):
__lowercase : Optional[Any] = audio_features[i]
__lowercase : Optional[int] = feature
# return as BatchFeature
if return_attention_mask:
__lowercase : Any = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
__lowercase : Optional[Any] = {"""audio_values""": padded_audio_features}
__lowercase : Dict = BatchFeature(data=__a , tensor_type=__a )
return encoded_inputs
| 360
|
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , __a : Dict , __a : List[str]=None , __a : Optional[Any]=None , __a : Union[str, Any]=None , __a : int="resnet50" , __a : List[str]=3 , __a : Tuple=32 , __a : Dict=3 , __a : List[str]=True , __a : Union[str, Any]=True , ) -> Any:
"""simple docstring"""
__lowercase : Optional[int] = parent
__lowercase : List[str] = out_indices if out_indices is not None else [4]
__lowercase : Optional[int] = stage_names
__lowercase : Any = out_features
__lowercase : Optional[Any] = backbone
__lowercase : Optional[Any] = batch_size
__lowercase : Union[str, Any] = image_size
__lowercase : List[str] = num_channels
__lowercase : str = use_pretrained_backbone
__lowercase : str = is_training
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : str = self.get_config()
return config, pixel_values
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Any ) -> Dict:
"""simple docstring"""
__lowercase : Dict = TimmBackbone(config=__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : Optional[Any] = model(__a )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
__lowercase : Union[str, Any] = self.prepare_config_and_inputs()
__lowercase , __lowercase : str = config_and_inputs
__lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : List[Any] = (TimmBackbone,) if is_torch_available() else ()
_A : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
_A : List[Any] = False
_A : List[str] = False
_A : Any = False
_A : Optional[Any] = False
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = TimmBackboneModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a )
def lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : str ) -> Tuple:
"""simple docstring"""
__lowercase : Tuple = """resnet18"""
__lowercase : Optional[int] = """microsoft/resnet-18"""
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a )
__lowercase : Dict = AutoBackbone.from_pretrained(__a )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
__lowercase : Union[str, Any] = AutoBackbone.from_pretrained(__a , use_timm_backbone=__a , out_indices=[1, 2, 3] )
__lowercase : Optional[Any] = AutoBackbone.from_pretrained(__a , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : Optional[Any] = model_class(__a )
__lowercase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase , __lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase : Optional[Any] = True
__lowercase : Union[str, Any] = self.has_attentions
# no need to test all models as different heads yield the same functionality
__lowercase : Union[str, Any] = self.all_model_classes[0]
__lowercase : List[Any] = model_class(__a )
model.to(__a )
__lowercase : Optional[Any] = self._prepare_for_class(__a , __a )
__lowercase : Union[str, Any] = model(**__a )
__lowercase : Optional[int] = outputs[0][-1]
# Encoder-/Decoder-only models
__lowercase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
__lowercase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__a )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : List[str] = model_class(__a )
model.to(__a )
model.eval()
__lowercase : int = model(**__a )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
__lowercase : Any = copy.deepcopy(__a )
__lowercase : Dict = None
__lowercase : Tuple = model_class(__a )
model.to(__a )
model.eval()
__lowercase : Optional[int] = model(**__a )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
__lowercase : List[str] = copy.deepcopy(__a )
__lowercase : Optional[Any] = False
__lowercase : str = model_class(__a )
model.to(__a )
model.eval()
__lowercase : List[Any] = model(**__a )
| 306
| 0
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : Optional[Any] = list(lowerCAmelCase_ )
__lowercase : str = list(lowerCAmelCase_ )
__lowercase : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
__lowercase : Dict = """_"""
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : list[str] ):
__lowercase : Tuple = []
while True:
__lowercase : Union[str, Any] = ["""$"""] * len(lowerCAmelCase_ )
__lowercase : int = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
__lowercase : int = compare_string(binary[i] , binary[j] )
if k is False:
__lowercase : Union[str, Any] = """*"""
__lowercase : Tuple = """*"""
temp.append("""X""" )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
__lowercase : Union[str, Any] = list(set(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : Sequence[float] ):
__lowercase : List[Any] = []
for minterm in minterms:
__lowercase : Any = """"""
for _ in range(lowerCAmelCase_ ):
__lowercase : int = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
__lowercase : Tuple = list(lowerCAmelCase_ )
__lowercase : List[str] = list(lowerCAmelCase_ )
__lowercase : Dict = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def snake_case_ ( lowerCAmelCase_ : list[list[int]] , lowerCAmelCase_ : list[str] ):
__lowercase : Dict = []
__lowercase : List[str] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
__lowercase : Optional[int] = 0
__lowercase : Union[str, Any] = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
__lowercase : Optional[Any] = j
if count == 1:
__lowercase : Optional[int] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
__lowercase : Tuple = 0
temp.append(prime_implicants[i] )
while True:
__lowercase : Dict = 0
__lowercase : List[str] = -1
__lowercase : Any = 0
for i in range(len(lowerCAmelCase_ ) ):
__lowercase : Optional[Any] = chart[i].count(1 )
if count_n > max_n:
__lowercase : List[Any] = count_n
__lowercase : Tuple = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
__lowercase : List[str] = 0
def snake_case_ ( lowerCAmelCase_ : list[str] , lowerCAmelCase_ : list[str] ):
__lowercase : Optional[Any] = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
__lowercase : Dict = prime_implicants[i].count("""_""" )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
__lowercase : Union[str, Any] = 1
return chart
def snake_case_ ( ):
__lowercase : List[str] = int(input("""Enter the no. of variables\n""" ) )
__lowercase : Any = [
float(lowerCAmelCase_ )
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""" ).split()
]
__lowercase : Dict = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Union[str, Any] = check(lowerCAmelCase_ )
print("""Prime Implicants are:""" )
print(lowerCAmelCase_ )
__lowercase : Any = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print("""Essential Prime Implicants are:""" )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 361
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCamelCase : Optional[Any] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : int ):
for attribute in key.split(""".""" ):
__lowercase : List[str] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
__lowercase : Union[str, Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
__lowercase : Dict = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
__lowercase : Dict = value
elif weight_type == "weight_g":
__lowercase : Union[str, Any] = value
elif weight_type == "weight_v":
__lowercase : List[Any] = value
elif weight_type == "bias":
__lowercase : int = value
elif weight_type == "running_mean":
__lowercase : List[Any] = value
elif weight_type == "running_var":
__lowercase : int = value
elif weight_type == "num_batches_tracked":
__lowercase : int = value
elif weight_type == "inv_freq":
__lowercase : Optional[Any] = value
else:
__lowercase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def snake_case_ ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
__lowercase : str = []
__lowercase : Any = fairseq_model.state_dict()
__lowercase : List[str] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Optional[Any] = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == """group""" , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : Any = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__lowercase : Tuple = True
if "*" in mapped_key:
__lowercase : List[Any] = name.split(lowerCAmelCase_ )[0].split(""".""" )[-2]
__lowercase : Any = mapped_key.replace("""*""" , lowerCAmelCase_ )
if "pos_bias_u" in name:
__lowercase : Any = None
elif "pos_bias_v" in name:
__lowercase : Tuple = None
elif "weight_g" in name:
__lowercase : Union[str, Any] = """weight_g"""
elif "weight_v" in name:
__lowercase : Dict = """weight_v"""
elif "bias" in name:
__lowercase : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowercase : str = """weight"""
elif "running_mean" in name:
__lowercase : str = """running_mean"""
elif "inv_freq" in name:
__lowercase : List[Any] = """inv_freq"""
elif "running_var" in name:
__lowercase : Any = """running_var"""
elif "num_batches_tracked" in name:
__lowercase : Any = """num_batches_tracked"""
else:
__lowercase : Optional[int] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
__lowercase : int = name.split(""".""" )
__lowercase : Optional[Any] = int(items[0] )
__lowercase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__lowercase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__lowercase : Union[str, Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__lowercase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Any=None , lowerCAmelCase_ : Dict=True ):
if config_path is not None:
__lowercase : List[Any] = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act="""swish""" )
else:
__lowercase : List[Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
__lowercase : Tuple = """rotary"""
if is_finetuned:
if dict_path:
__lowercase : Any = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : List[Any] = target_dict.pad_index
__lowercase : Optional[int] = target_dict.bos_index
__lowercase : List[Any] = target_dict.eos_index
__lowercase : List[str] = len(target_dict.symbols )
__lowercase : Union[str, Any] = os.path.join(lowerCAmelCase_ , """vocab.json""" )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
__lowercase : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowercase : int = 0
__lowercase : Any = 1
with open(lowerCAmelCase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Dict = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowerCAmelCase_ , )
__lowercase : List[Any] = True if config.feat_extract_norm == """layer""" else False
__lowercase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
__lowercase : Optional[int] = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
__lowercase : Union[str, Any] = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
__lowercase : Optional[Any] = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
__lowercase , __lowercase , __lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__lowercase : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
__lowercase : Optional[Any] = fairseq.tasks.setup_task(lowerCAmelCase_ )
__lowercase , __lowercase , __lowercase : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
__lowercase : Dict = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCamelCase : Any = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 306
| 0
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
lowerCamelCase : str = {
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Optional[Any] = '''maskformer'''
_A : Tuple = {'''hidden_size''': '''mask_feature_size'''}
_A : Optional[int] = ['''resnet''', '''swin''']
_A : str = ['''detr''']
def __init__( self : Any , __a : int = 256 , __a : int = 256 , __a : float = 0.1 , __a : bool = False , __a : Optional[Dict] = None , __a : Optional[Dict] = None , __a : float = 0.02 , __a : float = 1.0 , __a : float = 1.0 , __a : float = 1.0 , __a : float = 20.0 , __a : Optional[bool] = None , **__a : Union[str, Any] , ) -> Any:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__lowercase : List[str] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , )
if isinstance(__a , __a ):
__lowercase : Any = backbone_config.pop("""model_type""" )
__lowercase : List[str] = CONFIG_MAPPING[backbone_model_type]
__lowercase : Optional[int] = config_class.from_dict(__a )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__lowercase : Optional[Any] = DetrConfig()
else:
# verify that the decoder is supported
__lowercase : Dict = (
decoder_config.pop("""model_type""" ) if isinstance(__a , __a ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(__a , __a ):
__lowercase : Dict = CONFIG_MAPPING[decoder_type]
__lowercase : Any = config_class.from_dict(__a )
__lowercase : List[str] = backbone_config
__lowercase : Optional[int] = decoder_config
# main feature dimension for the model
__lowercase : Dict = fpn_feature_size
__lowercase : Optional[int] = mask_feature_size
# initializer
__lowercase : str = init_std
__lowercase : int = init_xavier_std
# Hungarian matcher && loss
__lowercase : int = cross_entropy_weight
__lowercase : List[str] = dice_weight
__lowercase : Dict = mask_weight
__lowercase : Any = use_auxiliary_loss
__lowercase : str = no_object_weight
__lowercase : str = output_auxiliary_logits
__lowercase : List[Any] = self.decoder_config.encoder_attention_heads
__lowercase : List[Any] = self.decoder_config.num_hidden_layers
super().__init__(**__a )
@classmethod
def lowerCAmelCase ( cls : int , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> List[str]:
"""simple docstring"""
return cls(
backbone_config=__a , decoder_config=__a , **__a , )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict[str, any]:
"""simple docstring"""
__lowercase : str = copy.deepcopy(self.__dict__ )
__lowercase : Dict = self.backbone_config.to_dict()
__lowercase : int = self.decoder_config.to_dict()
__lowercase : Optional[int] = self.__class__.model_type
return output
| 362
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
raise ValueError("""String lengths must match!""" )
__lowercase : str = 0
for chara, chara in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
| 0
|
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
assert x is not None
assert y is not None
__lowercase : Any = len(lowerCAmelCase_ )
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
# declaring the array for storing the dp values
__lowercase : Any = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__lowercase : str = 1 if x[i - 1] == y[j - 1] else 0
__lowercase : Dict = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__lowercase : List[str] = """"""
__lowercase : Dict = m, n
while i > 0 and j > 0:
__lowercase : List[str] = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowercase : List[Any] = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = '''AGGTAB'''
lowerCamelCase : Optional[Any] = '''GXTXAYB'''
lowerCamelCase : List[str] = 4
lowerCamelCase : Any = '''GTAB'''
lowerCamelCase : List[Any] = longest_common_subsequence(a, b)
print('''len =''', ln, ''', sub-sequence =''', subseq)
import doctest
doctest.testmod()
| 363
|
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def snake_case_ ( lowerCAmelCase_ : Tuple ):
if isinstance(lowerCAmelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase :
'''simple docstring'''
def lowerCAmelCase ( self : Any , __a : Any , __a : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple , __a : np.ndarray , __a : np.ndarray , __a : float ) -> List[Any]:
"""simple docstring"""
__lowercase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__a , __a , F"Difference between torch and flax is {diff} (>= {tol})." )
def lowerCAmelCase ( self : Tuple , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] , __a : Optional[Any]=None , **__a : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : str = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Optional[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def lowerCAmelCase ( self : Optional[int] , __a : Optional[int] , __a : Dict , __a : Dict , __a : List[str] , __a : Optional[Any]=None , **__a : str ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Any = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def lowerCAmelCase ( self : Tuple , __a : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Dict , __a : int=None , **__a : int ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : Tuple = self.get_vision_text_model(__a , __a )
__lowercase : Union[str, Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : List[str] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : List[Any] = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
__lowercase : int = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Tuple = model(input_ids=__a , pixel_values=__a , attention_mask=__a )
__lowercase : int = after_output[0]
__lowercase : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-3 )
def lowerCAmelCase ( self : List[Any] , __a : Any , __a : Tuple , __a : Optional[int] , __a : str , __a : Optional[Any]=None , **__a : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase , __lowercase : str = self.get_vision_text_model(__a , __a )
__lowercase : Optional[Any] = {"""vision_model""": vision_model, """text_model""": text_model}
__lowercase : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__a )
__lowercase : Union[str, Any] = model(
input_ids=__a , pixel_values=__a , attention_mask=__a , output_attentions=__a )
__lowercase : Optional[int] = output.vision_model_output.attentions
self.assertEqual(len(__a ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase : Optional[int] = to_atuple(vision_model.config.image_size )
__lowercase : List[str] = to_atuple(vision_model.config.patch_size )
__lowercase : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase : int = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase : Dict = output.text_model_output.attentions
self.assertEqual(len(__a ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def lowerCAmelCase ( self : Optional[int] , __a : List[str] , __a : List[Any] , __a : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pt_model.to(__a )
pt_model.eval()
# prepare inputs
__lowercase : Union[str, Any] = inputs_dict
__lowercase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase : Union[str, Any] = pt_model(**__a ).to_tuple()
__lowercase : Tuple = fx_model(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__a )
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained(__a , from_pt=__a )
__lowercase : Dict = fx_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__a , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__a )
__lowercase : str = VisionTextDualEncoderModel.from_pretrained(__a , from_flax=__a )
pt_model_loaded.to(__a )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase : List[Any] = pt_model_loaded(**__a ).to_tuple()
self.assertEqual(len(__a ) , len(__a ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__a , pt_output_loaded.numpy() , 4E-2 )
def lowerCAmelCase ( self : Optional[int] , __a : List[Any] , __a : int , __a : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : str = VisionTextDualEncoderModel(__a )
__lowercase : Union[str, Any] = FlaxVisionTextDualEncoderModel(__a )
__lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __a )
__lowercase : Any = fx_state
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : Any , __a : Any , __a : Dict , __a : Tuple ) -> str:
"""simple docstring"""
__lowercase : int = VisionTextDualEncoderConfig.from_vision_text_configs(__a , __a )
__lowercase : Union[str, Any] = VisionTextDualEncoderModel(__a )
__lowercase : Dict = FlaxVisionTextDualEncoderModel(__a )
__lowercase : Tuple = load_flax_weights_in_pytorch_model(__a , fx_model.params )
self.check_pt_flax_equivalence(__a , __a , __a )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__a )
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__a )
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
self.check_save_load(**__a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase : str = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__a )
@is_pt_flax_cross_test
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : Optional[Any] = self.prepare_config_and_inputs()
__lowercase : Optional[int] = config_inputs_dict.pop("""vision_config""" )
__lowercase : Optional[int] = config_inputs_dict.pop("""text_config""" )
__lowercase : Dict = config_inputs_dict
self.check_equivalence_pt_to_flax(__a , __a , __a )
self.check_equivalence_flax_to_pt(__a , __a , __a )
@slow
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase , __lowercase : List[Any] = self.get_pretrained_model_and_inputs()
__lowercase : Dict = model_a(**__a )
__lowercase : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__a )
__lowercase : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__a )
__lowercase : Optional[int] = model_a(**__a )
__lowercase : Tuple = after_outputs[0]
__lowercase : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1E-5 )
@require_flax
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : int = 13
__lowercase : Union[str, Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Dict = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : Tuple = random_attention_mask([batch_size, 4] )
__lowercase : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : Optional[Any] , __a : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : int = FlaxViTModel(__a )
__lowercase : List[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = FlaxViTModelTester(self )
__lowercase : str = FlaxBertModelTester(self )
__lowercase : List[str] = vit_model_tester.prepare_config_and_inputs()
__lowercase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Optional[int] = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Any = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=__a , text_from_pt=__a , )
__lowercase : Tuple = 13
__lowercase : Optional[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase : Tuple = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__lowercase : List[Any] = random_attention_mask([batch_size, 4] )
__lowercase : int = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def lowerCAmelCase ( self : str , __a : str , __a : Union[str, Any] ) -> Any:
"""simple docstring"""
__lowercase : Dict = FlaxCLIPVisionModel(__a )
__lowercase : Optional[Any] = FlaxBertModel(__a )
return vision_model, text_model
def lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
__lowercase : List[Any] = FlaxCLIPVisionModelTester(self )
__lowercase : Optional[Any] = FlaxBertModelTester(self )
__lowercase : Any = clip_model_tester.prepare_config_and_inputs()
__lowercase : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase : Dict = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase : Optional[int] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Any = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
__lowercase : int = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
__lowercase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__lowercase : Tuple = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=__a , padding=__a , return_tensors="""np""" )
__lowercase : Optional[int] = model(**__a )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__lowercase : Optional[Any] = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __a , atol=1E-3 ) )
| 306
| 0
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__a ) , '''Tatoeba directory does not exist.''' )
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=__a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.resolver.convert_models(["""heb-eng"""] )
@slow
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase : Tuple = self.resolver.write_model_card("""opus-mt-he-en""" , dry_run=__a )
assert mmeta["long_pair"] == "heb-eng"
| 364
|
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 306
| 0
|
def snake_case_ ( lowerCAmelCase_ : int , lowerCAmelCase_ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def snake_case_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 365
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : List[str] = 2_56
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
__lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( lowerCAmelCase_ : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , *,
__a : float = 0.85 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = duplication_jaccard_threshold
__lowercase : Optional[Any] = NUM_PERM
__lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase : List[str] = defaultdict(__a )
def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None:
"""simple docstring"""
__lowercase : List[Any] = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase : Dict = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase : List[str] = [base] + list(__a )
# reformat the cluster to be a list of dict
__lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def lowerCAmelCase ( self : Any , __a : int ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase , __lowercase : Union[str, Any] = element
__lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ):
__lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[str] = get_tokens(lowerCAmelCase_ )
__lowercase : Dict = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : List[str] = None
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = []
for elementa in cluster:
__lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase : Dict = 1
extremes.append(lowerCAmelCase_ )
return extremes
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
global _shared_dataset
__lowercase : Tuple = dataset
__lowercase : Optional[int] = []
__lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ):
__lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase : int = {}
__lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase : Optional[Any] = element
__lowercase : int = duplicate_indices - set(extreme_dict.keys() )
__lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(lowerCAmelCase_ )}" )
print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" )
print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" )
return ds_filter, duplicate_clusters
| 306
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.